text stringlengths 4 1.02M | meta dict |
|---|---|
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.day_v30_rc1 import DayV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.month_v30_rc1 import MonthV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.year_v30_rc1 import YearV30Rc1 # noqa: F401,E501
class FuzzyDateV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'year': 'YearV30Rc1',
'month': 'MonthV30Rc1',
'day': 'DayV30Rc1'
}
attribute_map = {
'year': 'year',
'month': 'month',
'day': 'day'
}
def __init__(self, year=None, month=None, day=None): # noqa: E501
"""FuzzyDateV30Rc1 - a model defined in Swagger""" # noqa: E501
self._year = None
self._month = None
self._day = None
self.discriminator = None
self.year = year
if month is not None:
self.month = month
if day is not None:
self.day = day
@property
def year(self):
"""Gets the year of this FuzzyDateV30Rc1. # noqa: E501
:return: The year of this FuzzyDateV30Rc1. # noqa: E501
:rtype: YearV30Rc1
"""
return self._year
@year.setter
def year(self, year):
"""Sets the year of this FuzzyDateV30Rc1.
:param year: The year of this FuzzyDateV30Rc1. # noqa: E501
:type: YearV30Rc1
"""
if year is None:
raise ValueError("Invalid value for `year`, must not be `None`") # noqa: E501
self._year = year
@property
def month(self):
"""Gets the month of this FuzzyDateV30Rc1. # noqa: E501
:return: The month of this FuzzyDateV30Rc1. # noqa: E501
:rtype: MonthV30Rc1
"""
return self._month
@month.setter
def month(self, month):
"""Sets the month of this FuzzyDateV30Rc1.
:param month: The month of this FuzzyDateV30Rc1. # noqa: E501
:type: MonthV30Rc1
"""
self._month = month
@property
def day(self):
"""Gets the day of this FuzzyDateV30Rc1. # noqa: E501
:return: The day of this FuzzyDateV30Rc1. # noqa: E501
:rtype: DayV30Rc1
"""
return self._day
@day.setter
def day(self, day):
"""Sets the day of this FuzzyDateV30Rc1.
:param day: The day of this FuzzyDateV30Rc1. # noqa: E501
:type: DayV30Rc1
"""
self._day = day
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FuzzyDateV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FuzzyDateV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| {
"content_hash": "f69e2872e95b3ceef40ef70825815983",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 119,
"avg_line_length": 27.727272727272727,
"alnum_prop": 0.5512568306010929,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "cdc89683880351fbecc1d53b65479a1e7bbd26df",
"size": "4592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api_v3/models/fuzzy_date_v30_rc1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
} |
'''
2D Legendre Fitting
Based on Numpy legendre routines
'''
import numpy as np
from numpy.polynomial.legendre import legfit,legvander,legvander2d
from numpy.linalg import lstsq
import matplotlib.pyplot as p
import warnings
def leg2dfit(array, deg, rcond=None, full=False, w=None):
'''
2D Legendre Fitting.
This routine has an issue with mapping back off of the [-1,1] interval. The issue lies in the normalization.
'''
order = int(deg) + 1
flat_array = array.ravel()
# find positions of nans
goodpts = np.where(np.isnan(flat_array))
if w is not None:
if w.ndim != 2:
raise TypeError("expected 2D array for w")
if array.shape != w.shape:
raise TypeError("expected array and w to have same shape")
goodpts = np.where(~np.isnan(flat_array+w.ravel()))
w = w.ravel()[goodpts]
x, y = np.meshgrid(np.linspace(-1,1,array.shape[0]), np.linspace(-1,1,array.shape[1]))
x, y = x.ravel()[goodpts], y.ravel()[goodpts]
# check arguments.
if deg < 0 :
raise ValueError("expected deg >= 0")
# set up the least squares matrices in transposed form
lhs = legvander2d(x, y, [deg,deg]).T
rhs = flat_array[goodpts].T
if w is not None:
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# scale the design matrix and solve the least squares equation
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
print scl
c, resids, rank, s = lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order**2. and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def full_leg2dfit(array,deg,patch_size,pix_to_pc,full=True):
if len(deg)!=2: raise ValueError("deg must be in form [xdeg,ydeg]")
if patch_size%2==0: raise ValueError("patch_size must be odd number")
size_orig = array.shape
patch_offset = int((patch_size-1)/2.)
# Pad array with nans so edges can be properly fit (routine already ignores nans)
arr = pad(array,patch_offset,padwithnans)
# Add edges of array to padded regions
arr[:patch_offset,patch_offset:arr.shape[1]-patch_offset] = array[:patch_offset,:]
arr[arr.shape[0]-patch_offset:,patch_offset:arr.shape[1]-patch_offset] = array[array.shape[0]-patch_offset:,:]
arr[patch_offset:arr.shape[0]-patch_offset,:patch_offset] = array[:,:patch_offset]
arr[patch_offset:arr.shape[0]-patch_offset,arr.shape[1]-patch_offset:] = array[:,array.shape[1]-patch_offset:]
size_new = arr.shape
# Set up resulting arrays
grad_x = np.zeros((size_orig))
# grad_x_err = np.zeros((size_orig))
grad_y = np.zeros((size_orig))
grad_x_2 = np.zeros((size_orig))
grad_y_2 = np.zeros((size_orig))
len_x = patch_size
len_y = patch_size
x = np.linspace(-1,1,len_x)
y = np.linspace(-1,1,len_y)
meshx,meshy = np.meshgrid(x,y)
for i in range(patch_offset+5,size_new[0]-(patch_offset+5)):
for j in range(patch_offset+5,size_new[1]-(patch_offset+5)):
missing = np.isnan(arr[i-patch_offset:i+patch_offset,j-patch_offset:j+patch_offset].ravel())
mask_arr = arr[i-patch_offset:i+patch_offset,j-patch_offset:j+patch_offset].ravel()[~missing]
mask_meshx = meshx.ravel()[~missing]
mask_meshy = meshy.ravel()[~missing]
if len(mask_meshy)==0: #Has to hold for both ### For catching negligible regions before fitting routine
fit = [np.empty((3,3)),np.NaN]
fit[0][:,:] = np.NaN
# fit[1][0][:,:] = np.NaN
coef = np.reshape(fit[0],(deg[0]+1,deg[1]+1))
# resid = np.reshape(fit[0],(deg[0]+1,deg[1]+1))
#print "No points to fit :(%s,%s)" % (i,j)
else:
try:
#raise np.linalg.linalg.LinAlgError
fit = leg2dfit(mask_meshx,mask_meshy,mask_arr,deg,full=True)
except np.linalg.linalg.LinAlgError:
print "Failed to fit :(%s,%s)" % (i,j)
fit = [np.empty((3,3)),np.NaN]
fit[0][:,:] = np.NaN
print fit[1][0]
coef = np.reshape(fit[0],(deg[0]+1,deg[1]+1))
# resid = np.reshape(fit[1][0],(deg[0]+1,deg[1]+1))
#print fit[0],coef
if coef[0,0]>1.2*arr[i,j] or coef[0,0]<0.8*arr[i,j]:
coef = coef#np.tril(coef[::-1])[::-1]
#print "Fit fairly off: (%s,%s)" % (i,j)
# grad_x[i-patch_offset,j-patch_offset] = np.NaN
# grad_y[i-patch_offset,j-patch_offset] = np.NaN
# grad_x_2[i-patch_offset,j-patch_offset] = np.NaN
# grad_y_2[i-patch_offset,j-patch_offset] = np.NaN
else:
coef = coef#np.tril(coef[::-1])[::-1]
#fit_arr = leggrid2d(x,y,coef).T * ~np.isnan(arr)
#Input fits into Result arrays
grad_x[i-patch_offset,j-patch_offset] = coef[0,1] * (1/float(pix_to_pc)) * patch_offset**-1
# grad_x_err[i-patch_offset,j-patch_offset] = resid[0,1] * (1/float(pix_to_pc)**2.) * patch_offset**-2
grad_y[i-patch_offset,j-patch_offset] = coef[1,2] * (1/float(pix_to_pc)) * patch_offset**-1
grad_x_2[i-patch_offset,j-patch_offset] = coef[0,2] #* pix_to_pc * patch_offset
grad_y_2[i-patch_offset,j-patch_offset] = coef[2,2] #* pix_to_pc * patch_offset
grad_mag = np.sqrt(grad_x**2. + grad_y**2.)
grad_dirn = np.arctan2(grad_y,grad_x)
# Return the fit array, coefficients of the fit, and the residuals etc. from lstsq
return grad_x, grad_y, grad_mag, grad_dirn, grad_x_2, grad_y_2
def legfunc_transform(coef,plot=False):
# Given an array of legendre coefficients, plots the sum of
# each degree of coefficients
xdeg = coef.shape[0]-1
ydeg = coef.shape[1]-1
if xdeg!=ydeg: raise TypeError("X and Y degrees must be the same.")
coef_sq = abs(coef)
#coef_sq = coef_sq[::-1] #Flips the array, sum can then be found from the trace
norm = range(1,xdeg+2)# + range(1,xdeg+1)[::-1]
sum_coef = []
for i in range(-xdeg,1):#xdeg +1):
sum_coef.append(np.trace(coef_sq[::-1],offset=i))
sum_coef = [sum_coef[i]/norm[i] for i in range(len(sum_coef))]
coef_range = range(xdeg+1)
if plot:
p.plot(coef_range,sum_coef)
p.show()
else: pass
| {
"content_hash": "40572a5eafb6bccce11b9fda37793c5e",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 112,
"avg_line_length": 37.71345029239766,
"alnum_prop": 0.6073809893006668,
"repo_name": "e-koch/ewky_scripts",
"id": "c9211c1341d632e1fb43bf3c9f432dbb932f6bc1",
"size": "6468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multidim_legendre.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "474102"
},
{
"name": "Python",
"bytes": "128329"
},
{
"name": "R",
"bytes": "1036"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals
import abc
import os
import re
import glob
import logging
import fnmatch
import json
import warnings
import six
from six.moves import zip
from monty.io import zopen
from pymatgen.io.vasp.inputs import Incar, Potcar, Poscar
from pymatgen.io.vasp.outputs import Vasprun, Oszicar, Dynmat
from pymatgen.io.gaussian import GaussianOutput
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from monty.json import MSONable
"""
This module define the various drones used to assimilate data.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 18, 2012"
logger = logging.getLogger(__name__)
class AbstractDrone(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Abstract drone class that defines the various methods that must be
implemented by drones. Because of the quirky nature of Python"s
multiprocessing, the intermediate data representations has to be in the
form of python primitives. So all objects that drones work with must be
MSONable. All drones must also implement the standard MSONable as_dict() and
from_dict API.
"""
@abc.abstractmethod
def assimilate(self, path):
"""
Assimilate data in a directory path into a pymatgen object. Because of
the quirky nature of Python"s multiprocessing, the object must support
pymatgen's as_dict() for parallel processing.
Args:
path: directory path
Returns:
An assimilated object
"""
return
@abc.abstractmethod
def get_valid_paths(self, path):
"""
Checks if path contains valid data for assimilation, and then returns
the valid paths. The paths returned can be a list of directory or file
paths, depending on what kind of data you are assimilating. For
example, if you are assimilating VASP runs, you are only interested in
directories containing vasprun.xml files. On the other hand, if you are
interested converting all POSCARs in a directory tree to cifs for
example, you will want the file paths.
Args:
path: input path as a tuple generated from os.walk, i.e.,
(parent, subdirs, files).
Returns:
List of valid dir/file paths for assimilation
"""
return
class VaspToComputedEntryDrone(AbstractDrone):
"""
VaspToEntryDrone assimilates directories containing vasp output to
ComputedEntry/ComputedStructureEntry objects. There are some restrictions
on the valid directory structures:
1. There can be only one vasp run in each directory.
2. Directories designated "relax1", "relax2" are considered to be 2 parts
of an aflow style run, and only "relax2" is parsed.
3. The drone parses only the vasprun.xml file.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. See
:class:`pymatgen.io.vasp.Vasprun`. If parameters is None,
a default set of parameters that are necessary for typical
post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
"""
def __init__(self, inc_structure=False, parameters=None, data=None):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"potcar_symbols", "run_type"}
if parameters:
self._parameters.update(parameters)
self._data = data if data else []
def assimilate(self, path):
files = os.listdir(path)
if "relax1" in files and "relax2" in files:
filepath = glob.glob(os.path.join(path, "relax2",
"vasprun.xml*"))[0]
else:
vasprun_files = glob.glob(os.path.join(path, "vasprun.xml*"))
filepath = None
if len(vasprun_files) == 1:
filepath = vasprun_files[0]
elif len(vasprun_files) > 1:
# Since multiple files are ambiguous, we will always read
# the one that it the last one alphabetically.
filepath = sorted(vasprun_files)[-1]
warnings.warn("%d vasprun.xml.* found. %s is being parsed." %
(len(vasprun_files), filepath))
try:
vasprun = Vasprun(filepath)
except Exception as ex:
logger.debug("error in {}: {}".format(filepath, ex))
return None
entry = vasprun.get_computed_entry(self._inc_structure,
parameters=self._parameters,
data=self._data)
entry.parameters["history"] = _get_transformation_history(path)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
if "relax1" in subdirs and "relax2" in subdirs:
return [parent]
if (not parent.endswith("/relax1")) and \
(not parent.endswith("/relax2")) and (
len(glob.glob(os.path.join(parent, "vasprun.xml*"))) > 0 or (
len(glob.glob(os.path.join(parent, "POSCAR*"))) > 0 and
len(glob.glob(os.path.join(parent, "OSZICAR*"))) > 0)
):
return [parent]
return []
def __str__(self):
return " VaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data},
"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class SimpleVaspToComputedEntryDrone(VaspToComputedEntryDrone):
"""
A simpler VaspToComputedEntryDrone. Instead of parsing vasprun.xml, it
parses only the INCAR, POTCAR, OSZICAR and KPOINTS files, which are much
smaller and faster to parse. However, much fewer properties are available
compared to the standard VaspToComputedEntryDrone.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries. Structure will be parsed from the CONTCAR.
"""
def __init__(self, inc_structure=False):
self._inc_structure = inc_structure
self._parameters = {"is_hubbard", "hubbards", "potcar_spec",
"run_type"}
def assimilate(self, path):
files = os.listdir(path)
try:
files_to_parse = {}
if "relax1" in files and "relax2" in files:
for filename in ("INCAR", "POTCAR", "POSCAR"):
search_str = os.path.join(path, "relax1", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[0]
for filename in ("CONTCAR", "OSZICAR"):
search_str = os.path.join(path, "relax2", filename + "*")
files_to_parse[filename] = glob.glob(search_str)[-1]
else:
for filename in (
"INCAR", "POTCAR", "CONTCAR", "OSZICAR", "POSCAR", "DYNMAT"
):
files = sorted(glob.glob(os.path.join(path, filename + "*")))
if len(files) < 1:
continue
if len(files) == 1 or filename == "INCAR" or \
filename == "POTCAR" or filename == "DYNMAT":
files_to_parse[filename] = files[-1]\
if filename == "POTCAR" else files[0]
elif len(files) > 1:
# Since multiple files are ambiguous, we will always
# use the first one for POSCAR and the last one
# alphabetically for CONTCAR and OSZICAR.
if filename == "POSCAR":
files_to_parse[filename] = files[0]
else:
files_to_parse[filename] = files[-1]
warnings.warn(
"%d files found. %s is being parsed." %
(len(files), files_to_parse[filename]))
poscar, contcar, incar, potcar, oszicar, dynmat = [None]*6
if 'POSCAR' in files_to_parse:
poscar = Poscar.from_file(files_to_parse["POSCAR"])
if 'CONTCAR' in files_to_parse:
contcar = Poscar.from_file(files_to_parse["CONTCAR"])
if 'INCAR' in files_to_parse:
incar = Incar.from_file(files_to_parse["INCAR"])
if 'POTCAR' in files_to_parse:
potcar = Potcar.from_file(files_to_parse["POTCAR"])
if 'OSZICAR' in files_to_parse:
oszicar = Oszicar(files_to_parse["OSZICAR"])
if 'DYNMAT' in files_to_parse:
dynmat = Dynmat(files_to_parse["DYNMAT"])
param = {"hubbards":{}}
if poscar is not None and incar is not None and "LDAUU" in incar:
param["hubbards"] = dict(zip(poscar.site_symbols,
incar["LDAUU"]))
param["is_hubbard"] = (
incar.get("LDAU", False) and sum(param["hubbards"].values()) > 0
) if incar is not None else False
param["run_type"] = None
if incar is not None:
param["run_type"] = "GGA+U" if param["is_hubbard"] else "GGA"
param["history"] = _get_transformation_history(path)
param["potcar_spec"] = potcar.spec if potcar is not None else None
energy = oszicar.final_energy if oszicar is not None else 1e10
structure = contcar.structure if contcar is not None\
else poscar.structure
initial_vol = poscar.structure.volume if poscar is not None else \
None
final_vol = contcar.structure.volume if contcar is not None else \
None
delta_volume = None
if initial_vol is not None and final_vol is not None:
delta_volume = (final_vol / initial_vol - 1)
data = {"filename": path, "delta_volume": delta_volume}
if dynmat is not None:
data['phonon_frequencies'] = dynmat.get_phonon_frequencies()
if self._inc_structure:
entry = ComputedStructureEntry(
structure, energy, parameters=param, data=data
)
else:
entry = ComputedEntry(
structure.composition, energy, parameters=param, data=data
)
return entry
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
def __str__(self):
return "SimpleVaspToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class GaussianToComputedEntryDrone(AbstractDrone):
"""
GaussianToEntryDrone assimilates directories containing Gaussian output to
ComputedEntry/ComputedStructureEntry objects. By default, it is assumed
that Gaussian output files have a ".log" extension.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the GaussianOutput object. See
:class:`pymatgen.io.gaussianio GaussianOutput`. The parameters
have to be one of python"s primitive types, i.e., list, dict of
strings and integers. If parameters is None, a default set of
parameters will be set.
data (list): Output data to include. Has to be one of the properties
supported by the GaussianOutput object. The parameters have to
be one of python"s primitive types, i.e. list, dict of strings
and integers. If data is None, a default set will be set.
file_extensions (list):
File extensions to be considered as Gaussian output files.
Defaults to just the typical "log" extension.
.. note::
Like the GaussianOutput class, this is still in early beta.
"""
def __init__(self, inc_structure=False, parameters=None, data=None,
file_extensions=(".log",)):
self._inc_structure = inc_structure
self._parameters = {"functional", "basis_set", "charge",
"spin_multiplicity", "route_parameters"}
if parameters:
self._parameters.update(parameters)
self._data = {"stationary_type", "properly_terminated"}
if data:
self._data.update(data)
self._file_extensions = file_extensions
def assimilate(self, path):
try:
gaurun = GaussianOutput(path)
except Exception as ex:
logger.debug("error in {}: {}".format(path, ex))
return None
param = {}
for p in self._parameters:
param[p] = getattr(gaurun, p)
data = {}
for d in self._data:
data[d] = getattr(gaurun, d)
if self._inc_structure:
entry = ComputedStructureEntry(gaurun.final_structure,
gaurun.final_energy,
parameters=param,
data=data)
else:
entry = ComputedEntry(gaurun.final_structure.composition,
gaurun.final_energy, parameters=param,
data=data)
return entry
def get_valid_paths(self, path):
(parent, subdirs, files) = path
return [os.path.join(parent, f) for f in files
if os.path.splitext(f)[1] in self._file_extensions]
def __str__(self):
return " GaussianToComputedEntryDrone"
def as_dict(self):
return {"init_args": {"inc_structure": self._inc_structure,
"parameters": self._parameters,
"data": self._data,
"file_extensions": self._file_extensions},
"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
def _get_transformation_history(path):
"""
Checks for a transformations.json* file and returns the history.
"""
trans_json = glob.glob(os.path.join(path, "transformations.json*"))
if trans_json:
try:
with zopen(trans_json[0]) as f:
return json.load(f)["history"]
except:
return None
return None
| {
"content_hash": "848612542085ebb9a6d698f7b762b2cd",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 81,
"avg_line_length": 39.95189873417721,
"alnum_prop": 0.5666941258475382,
"repo_name": "johnson1228/pymatgen",
"id": "c63c929c51c2e39706c3e84e41aa2986101a1501",
"size": "15891",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pymatgen/apps/borg/hive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5938"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "4886182"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6064350"
},
{
"name": "Roff",
"bytes": "868"
}
],
"symlink_target": ""
} |
"""This module contains a class for representing the gameboard."""
from random import randint
from collections import namedtuple
import json
import pandas as pd
from .constants import Constants as C
from .gameboard_delegate import GameBoardDelegate
from .combiners import combine_left, combine_right
from .combiners import combine_left_mapped, combine_right_mapped
# a named tuple for the history points of a gameboard
GameBoardSnapshot = namedtuple('GameBoardSnapshot',
'board_bits board_json score move')
class GameBoard(object):
"""
This class models a 2048 game board with a table of tiles holding numbers.
The board is modeled by a 64 bit integer and all tile, row, column lookups
are performed using bit-shift operations.
"""
# the board to put the tiles on in row-major ordering this means the first
# index refers to the row and the second index to the column.
_board = 0
# the score of the game
_score = 0
# the history of metadata throughout the game
_history = []
# the delegate to send events to
_delegate = None
def __init__(self, delegate=None):
"""
Create a new gameboard.
- parameters:
- delegate: {GameBoardEventHandler} the delegate to pass events to.
"""
# reset the board (i.e. score and board set to 0)
self.reset()
# assign the delegate
if delegate and not isinstance(delegate, GameBoardDelegate):
msg = 'delegate must be of type None or GameBoardDelegate'
raise ValueError(msg)
self._delegate = delegate
@property
def copy(self):
"""Return a copy of this object."""
copy = GameBoard()
# copy the board from the other board, use the private setters, no
# need to run the verification logic as the board should always be
# in a valid state when copy is called and copy should be fast.
copy._board = self.board
# copy the score
copy._score = self.score
# setup the move map
# copy._move_map = self._move_map
return copy
def reset(self):
"""Reset the board and score."""
# reset the score
self._score = 0
# create the tile objects by iterating over the array
self._board = 0
# restore the history
self._history = []
@property
def history(self):
"""Return the history of this gameboard."""
return self._history
@property
def data_frame(self):
"""
Return the history as a `pandas` DataFrame.
- returns: {pandas.DataFrame} the history as a data frame
"""
columns = ['64-bit board', 'JSON board', 'score', 'move']
return pd.DataFrame.from_records(self._history, columns=columns)
@property
def board(self):
"""Return the board encoded as a 64 bit unsigned int."""
return self._board
@property
def board_json(self):
"""Return the board encoded into json."""
rows = []
for column in range(0, C.columns()):
rows.append(self.get_row_array(column))
return json.dumps({'board': rows})
@board.setter
def board(self, new_board):
"""
Set the board to a new value.
- parameters:
- new_board: {long} the new board to set
"""
self._board = new_board
@property
def board_array(self):
"""Return the board as a 2D array of bytes."""
board = []
for row in range(0, C.rows()):
# add a column list to the array
board.append([])
for column in range(0, C.columns()):
# append the tile in the column to the array
board[row].append(self.get_tile(row, column))
return board
@property
def score(self):
"""Return the current score of the game."""
return self._score
@score.setter
def score(self, new_score):
"""
Set the score to a new value.
- parameters:
- new_score: {number} the new score for the game, must be positive.
"""
if new_score < 0:
raise ValueError('scores cannot be negative')
self._score = new_score
def is_equal(self, other):
"""
Return a boolean determining the equality of this board to the given.
- parameters:
- other: {GameBoard} the gameboard to compare against
- returns: {boolean} true if the boards are equal, false otherwise
"""
# if the boards are the same, there was no change
if self.board == other.board and self.score == other.score:
return False
return True
def get_tile(self, row, column):
"""
Get a tile at a given row column index.
- parameters:
- row: {int} the row to get the tile from
- column: {int} the column to get the tile from
- return: {int} the tile as a nibble
"""
# verify the bound of the row
if row < 0 or row >= C.rows():
msg = 'row must be in [0, %i)'
raise ValueError(msg % C.rows())
# verify the bounds of the column
if column < 0 or column >= C.columns():
msg = 'column must be in [0, %i)'
raise ValueError(msg % C.columns())
# get the tiles unwrapped index
index = (row * C.rows()) + column
return self.get_tile_at(index)
def get_tile_at(self, index):
"""
Get the tile at the given index in the bitmap.
- parameters:
- index: {number} the index of the tile to get
- returns: {int} a nibble representing a single tiles exponent value
"""
# verify the bound of the row
if index < 0 or index >= C.tiles():
msg = 'index must be in [0, %s)'
raise ValueError(msg % C.tiles())
# get the offset based on index
row_offset = (C.rows() * index)
offset = C.board_size() - C.tile_size() - row_offset
# shift the board and mask it to get the value
return (self._board >> offset) & C.tile_value_mask()
def place_tile(self, row, column, value):
"""
Place a tile at the given row column index with the given value.
- parameters:
- row: {int} the index of the row to place at
- column: {int} the index of the column to place at
- value: {int} the value to place in [0, 15]
"""
# verify the bounds of the row
if row < 0 or row >= C.rows():
msg = 'row must be in [0, %i)'
raise ValueError(msg % C.columns())
# verify the bounds of the column
if column < 0 or column >= C.columns():
msg = 'column must be in [0, %i)'
raise ValueError(msg % C.columns())
# get the index
index = (row * C.rows()) + column
# place the tile
self.place_tile_at(index, value)
def place_tile_at(self, index, value):
"""
Place a new tile at the unwrapped index.
- parameters:
- index: {int} the index to place the value at
- value: {long} the value to place in the tile
"""
# verify the bound of the index
if index < 0 or index >= C.tiles():
msg = 'tile must be in [0, %i)'
raise ValueError(msg % C.tiles())
if value < 0 or value > 15:
raise ValueError('value must be in [0, 15]')
# calculate the offset
offset = C.rows() * index
# shift the zero to delete the old value
self._board = self._board & ~(C.tile_mask() >> offset)
# zero out garbage and shift value for placement, then add the new
# value to the board
mask = C.board_size() - C.tile_size() - offset
masked_value = ((value & C.tile_value_mask()) << mask)
self._board = self._board + masked_value
def place_random_tile(self):
"""Randomly place a new tile on the board."""
# make sure there is a place for the tile
if len(self.active_tiles) is C.tiles():
return
# get a list of the inactive tiles
inactive_tiles = self.inactive_tiles
value = 1 # 2 ^ 1 = 2
probability = randint(1, 1.0 / C.chance_of_four())
if probability is 1:
value = 2 # 2 ^ 2 = 4
# get a random index in the list
index = randint(0, len(inactive_tiles) - 1)
# place the tile on the board
self.place_tile_at(inactive_tiles[index], value)
# pass the event to the handler
if self._delegate is not None:
tile_index = inactive_tiles[index]
row = tile_index / C.rows()
column = tile_index % C.rows()
# self._delegate.didPlaceNewTile(row, column, value)
def place_row(self, index, value):
"""
Place a row onto the board.
- parameters:
- index: {int} the index of the row to replace
- value: {short} the value of the row to put into the board
"""
# cross out the row to make room for the new one
self._board = self._board & ~(C.row_mask() >> int(C.row_size() * index))
# cast the row and mask any sign extension, shift the row, then add the row to the board
self._board = self._board + (((value) & C.short_mask()) << int((C.rows() - 1 - index) * C.row_size()))
def get_row(self, index):
"""
Return the encoded row value for a given index.
- parameters:
- index: {int} the index of the row to select [0, 3]
- returns: {short} the row as 16 bits such that each 4 represents a
tile
"""
shifted_board = self.board >> int((C.rows() - 1 - index) * C.row_size())
return shifted_board & C.short_mask()
def place_row_array(self, index, row_array):
"""
Place a row array on the board.
- parameters:
- index: {int} the index of the row to place
- row_array: {byte[]} the list of tiles
"""
for column in range(0, C.columns()):
self.place_tile(index, column, row_array[column])
def get_row_array(self, row):
"""
Get the tiles in a row as an array of bytes.
- parameters:
- row: {int} the index of the row to get a tile array of
- returns: {byte[]} an array of tiles
"""
row_tiles = []
for column in range(0, C.columns()):
row_tiles.append(self.get_tile(row, column))
return row_tiles
def place_column(self, index, value):
"""
Place a column on the board.
- parameters:
- index: {int} the index to place the column at
- value: {short} the value of the column to place
"""
# shift the mask to the given column, flip the bits, then & with the board to zero the
# column.
self._board = self._board & ~(C.column_mask() >> int(C.tile_size() * index))
# first get the pieces of the board shifted into column positions, then shift the column
# itself into place and add it to the board.
self._board = self._board + (
(
# get the pieces of the board shifted into column positions as a long
( ((value >> C.tile_shift()[0]) & C.tile_value_mask()) << C.column_shift()[0] ) +
( ((value >> C.tile_shift()[1]) & C.tile_value_mask()) << C.column_shift()[1] ) +
( ((value >> C.tile_shift()[2]) & C.tile_value_mask()) << C.column_shift()[2] ) +
( value & C.tile_value_mask() )
#then shift the column
) << (C.rows() - 1 - index) * C.tile_size()
)
def get_column(self, index):
"""
Get the encoded column value for a given column.
- parameters:
- index: {int} the index of the column to select [0, 3]
- returns: {short} the column as 16 bits such that each 4 represents a
tile
"""
return ((self.get_tile(0, index) << C.tile_shift()[0]) +
(self.get_tile(1, index) << C.tile_shift()[1]) +
(self.get_tile(2, index) << C.tile_shift()[2]) +
(self.get_tile(3, index)))
def place_column_array(self, index, column_array):
"""
Place a column array on the board.
- parameters:
- index: {int} the index of the column to place
- column_array: {byte[]} the list of tiles
"""
for row in range(0, C.rows()):
self.place_tile(row, index, column_array[row])
def get_column_array(self, column):
"""
Get the tiles in a column as an array of bytes.
- parameters:
- column: {int} the index of the row to get a tile array of
- returns: {byte[]} an array of tiles
"""
column_tiles = []
# iterate over the rows
for row in range(0, C.rows()):
column_tiles.append(self.get_tile(row, column))
return column_tiles
@property
def inactive_tiles(self):
"""Return a list containing pointers to the inactive tiles."""
inactive_tiles = []
for index in range(0, C.tiles()):
if self.get_tile_at(index) is 0:
inactive_tiles.append(index)
return inactive_tiles
@property
def active_tiles(self):
"""Return a list containing pointers to the active tiles."""
active_tiles = []
for index in range(0, C.tiles()):
if self.get_tile_at(index) is not 0:
active_tiles.append(index)
return active_tiles
# MARK: Movement
def _move(self, length, combine, get, place):
"""
Perform a move function dynamically on a row / col array.
- parameters:
- length: {int} the number of row / col to perform a move action on
- combine: {function} a combine function that returns a new board
and score as a tuple
- get: {function} the function that will return a row / col
- place: {function} the function that will place a new row / col
"""
for index in range(0, length):
combination = combine(get(index))
place(index, combination[0])
self._score = self._score + combination[1]
def _move_up(self):
"""
Move the pieces up on the game board and increment the score.
- precondition: assume the move is legal
"""
self._move(C.rows(), combine_left,
self.get_column_array, self.place_column_array)
def _move_down(self):
"""
Move the pieces down on the game board and increment the score.
- precondition: assume the move is legal
"""
self._move(C.rows(), combine_right,
self.get_column_array, self.place_column_array)
def _move_left(self):
"""
Move the pieces left on the game board and increment the score.
- precondition: assume the move is legal
"""
self._move(C.columns(), combine_left,
self.get_row_array, self.place_row_array)
def _move_right(self):
"""
Move the pieces right on the game board and increment the score.
- precondition: assume the move is legal
"""
self._move(C.columns(), combine_right,
self.get_row_array, self.place_row_array)
# MARK: Can move
@property
def can_move_up(self):
"""Return a boolean determining whether a move up is possible."""
copy = self.copy
copy._move_up()
return self.is_equal(copy)
@property
def can_move_down(self):
"""Return a boolean determining whether a move down is possible."""
copy = self.copy
copy._move_down()
return self.is_equal(copy)
@property
def can_move_left(self):
"""Return a boolean determining whether a move left is possible."""
copy = self.copy
copy._move_left()
return self.is_equal(copy)
@property
def can_move_right(self):
"""Return a boolean determining whether a move right is possible."""
copy = self.copy
copy._move_right()
return self.is_equal(copy)
@property
def can_move(self):
"""Return a boolean determining if there are any moves on the board."""
return self.can_move_up or self.can_move_down or self.can_move_left or self.can_move_right
# Guarded Movement
def _try_move(self, can_move, move, did_move=None):
if can_move:
# get the matrix of moves
# int[][] moveMatrix = LikeValueIndexMapper.getUpMovementMatrixFor(self)
# store the old score (for the handler)
old_score = self.score
# make the move
move()
# take a snapshot
snapshot = GameBoardSnapshot(board_bits=self._board,
board_json=self.board_json,
score=self._score,
move=str(move.__name__))
self._history.append(snapshot)
# if there is a handler, pass the event
if did_move:
did_score = self.score > old_score
# did_move(did_score, moveMatrix)
return True
return False
def move_up(self):
"""
Try to move the pieces up on the game board.
If the move isn't possible then return false and leave the game in
its current state.
- returns: {boolean} true if the move occured, false otherwise
"""
return self._try_move(self.can_move_up, self._move_up)
def move_down(self):
"""
Try to move the pieces down on the game board.
If the move isn't possible then return false and leave the game in
its current state.
- returns: {boolean} true if the move occured, false otherwise
"""
return self._try_move(self.can_move_down, self._move_down)
def move_left(self):
"""
Try to move the pieces left on the game board.
If the move isn't possible then return false and leave the game in
its current state.
- returns: {boolean} true if the move occured, false otherwise
"""
return self._try_move(self.can_move_left, self._move_left)
def move_right(self):
"""
Try to move the pieces right on the game board.
If the move isn't possible then return false and leave the game in
its current state.
- returns: {boolean} true if the move occured, false otherwise
"""
return self._try_move(self.can_move_right, self._move_right)
# MARK: Description
@property
def description(self):
"""Return a human readable version of this gameboard."""
description = ""
# iterate over all the tiles
for i in range(0, C.tiles()):
# get the tile at this index
value = self.get_tile_at(i)
if value is 0:
tile = '|{0: >6} |'.format('')
else:
tile = '|{0: >6} |'.format(2**value)
# add the tile to the string
description += tile
# if the index is about to be a multiple of four, add a new line
if (i + 1) % 4 is 0:
description += "\n"
return description
def __repr__(self):
"""Return a string representation of this object."""
return self.description
| {
"content_hash": "70a856d16a44ecf8375bbb6fb8384f36",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 110,
"avg_line_length": 34.88986013986014,
"alnum_prop": 0.5556446359673297,
"repo_name": "Kautenja/tfe_engine",
"id": "5315073df1ec344a7fc4de40785608e3edc08741",
"size": "19957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfe_engine/_gameboard/gameboard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "121613"
}
],
"symlink_target": ""
} |
import logging
from typing import Text
from rasa.shared.exceptions import RasaException
logger = logging.getLogger(__name__)
# TODO: remove/move
class InvalidModelError(RasaException):
"""Raised when a model failed to load.
Attributes:
message -- explanation of why the model is invalid
"""
def __init__(self, message: Text) -> None:
"""Initialize message attribute."""
self.message = message
super(InvalidModelError, self).__init__(message)
def __str__(self) -> Text:
return self.message
| {
"content_hash": "20ab4276588892ea867971659d850fcd",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 58,
"avg_line_length": 23.208333333333332,
"alnum_prop": 0.6588868940754039,
"repo_name": "RasaHQ/rasa_nlu",
"id": "52832731f56176fb9e353b4cfb1e21dc708caa30",
"size": "557",
"binary": false,
"copies": "1",
"ref": "refs/heads/emptystring_10504",
"path": "rasa/nlu/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "3462"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "1467067"
},
{
"name": "Shell",
"bytes": "941"
}
],
"symlink_target": ""
} |
import os
import sys
import functools
import resource
STATUS = 0
def error_unless_permitted(env_var, message):
global STATUS
if not os.getenv(env_var):
sys.stderr.write(message)
STATUS = 1
def only_on(platforms):
def decorator(func):
@functools.wraps(func)
def inner():
if any(map(lambda x: sys.platform.startswith(x), platforms)):
func()
return inner
return decorator
@only_on(('linux', 'darwin', 'freebsd', 'openbsd'))
def check_rlimit_core():
soft, hard = resource.getrlimit(resource.RLIMIT_CORE)
if soft > 0:
error_unless_permitted('ALLOW_NONZERO_RLIMIT_CORE', """\
RLIMIT_CORE is set to a nonzero value (%d). During debuginfo, the test suite
will segfault many rustc's, creating many potentially large core files.
set ALLOW_NONZERO_RLIMIT_CORE to ignore this warning
""" % (soft))
def main():
check_rlimit_core()
if __name__ == '__main__':
main()
sys.exit(STATUS)
| {
"content_hash": "ed85f4ebbccb5e781cdb5d3deff49212",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 23.595238095238095,
"alnum_prop": 0.644803229061554,
"repo_name": "miniupnp/rust",
"id": "0d9c430ec3acd9c5fcb3f7c96da0886bf8c740fc",
"size": "1478",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/etc/check-sanitycheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "4990"
},
{
"name": "Assembly",
"bytes": "20064"
},
{
"name": "Awk",
"bytes": "159"
},
{
"name": "C",
"bytes": "725379"
},
{
"name": "C++",
"bytes": "55803"
},
{
"name": "CSS",
"bytes": "22489"
},
{
"name": "HTML",
"bytes": "552"
},
{
"name": "JavaScript",
"bytes": "36150"
},
{
"name": "LLVM",
"bytes": "1587"
},
{
"name": "Lex",
"bytes": "9238"
},
{
"name": "Makefile",
"bytes": "227377"
},
{
"name": "PHP",
"bytes": "265"
},
{
"name": "Pascal",
"bytes": "13535"
},
{
"name": "Puppet",
"bytes": "2765"
},
{
"name": "Python",
"bytes": "142548"
},
{
"name": "Rust",
"bytes": "19242730"
},
{
"name": "Shell",
"bytes": "269546"
},
{
"name": "TeX",
"bytes": "57"
},
{
"name": "Yacc",
"bytes": "78848"
}
],
"symlink_target": ""
} |
"""
Support for interacting with and controlling the cmus music player.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.cmus/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_SET,
MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_PAUSED,
STATE_PLAYING)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pycmus==0.1.1']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'cmus'
DEFAULT_PORT = 3000
SUPPORT_CMUS = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_TURN_OFF | \
SUPPORT_TURN_ON | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_PLAY_MEDIA | SUPPORT_SEEK | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Inclusive(CONF_HOST, 'remote'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'remote'): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discover_info=None):
"""Set up the CMUS platform."""
from pycmus import exceptions
host = config.get(CONF_HOST)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
try:
cmus_remote = CmusDevice(host, password, port, name)
except exceptions.InvalidPassword:
_LOGGER.error("The provided password was rejected by cmus")
return False
add_entities([cmus_remote], True)
class CmusDevice(MediaPlayerDevice):
"""Representation of a running cmus."""
# pylint: disable=no-member
def __init__(self, server, password, port, name):
"""Initialize the CMUS device."""
from pycmus import remote
if server:
self.cmus = remote.PyCmus(
server=server, password=password, port=port)
auto_name = 'cmus-{}'.format(server)
else:
self.cmus = remote.PyCmus()
auto_name = 'cmus-local'
self._name = name or auto_name
self.status = {}
def update(self):
"""Get the latest data and update the state."""
status = self.cmus.get_status_dict()
if not status:
_LOGGER.warning("Received no status from cmus")
else:
self.status = status
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self.status.get('status') == 'playing':
return STATE_PLAYING
if self.status.get('status') == 'paused':
return STATE_PAUSED
return STATE_OFF
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.status.get('file')
@property
def content_type(self):
"""Content type of the current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self.status.get('duration')
@property
def media_title(self):
"""Title of current playing media."""
return self.status['tag'].get('title')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self.status['tag'].get('artist')
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return self.status['tag'].get('tracknumber')
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self.status['tag'].get('album')
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return self.status['tag'].get('albumartist')
@property
def volume_level(self):
"""Return the volume level."""
left = self.status['set'].get('vol_left')[0]
right = self.status['set'].get('vol_right')[0]
if left != right:
volume = float(left + right) / 2
else:
volume = left
return int(volume)/100
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_CMUS
def turn_off(self):
"""Service to send the CMUS the command to stop playing."""
self.cmus.player_stop()
def turn_on(self):
"""Service to send the CMUS the command to start playing."""
self.cmus.player_play()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.cmus.set_volume(int(volume * 100))
def volume_up(self):
"""Set the volume up."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) + 5)
def volume_down(self):
"""Set the volume down."""
left = self.status['set'].get('vol_left')
right = self.status['set'].get('vol_right')
if left != right:
current_volume = float(left + right) / 2
else:
current_volume = left
if current_volume <= 100:
self.cmus.set_volume(int(current_volume) - 5)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play command."""
if media_type in [MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST]:
self.cmus.player_play_file(media_id)
else:
_LOGGER.error(
"Invalid media type %s. Only %s and %s are supported",
media_type, MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST)
def media_pause(self):
"""Send the pause command."""
self.cmus.player_pause()
def media_next_track(self):
"""Send next track command."""
self.cmus.player_next()
def media_previous_track(self):
"""Send next track command."""
self.cmus.player_prev()
def media_seek(self, position):
"""Send seek command."""
self.cmus.seek(position)
def media_play(self):
"""Send the play command."""
self.cmus.player_play()
def media_stop(self):
"""Send the stop command."""
self.cmus.stop()
| {
"content_hash": "521668bf845166345fc42edcb6a9238d",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 79,
"avg_line_length": 31.172727272727272,
"alnum_prop": 0.6086322543015457,
"repo_name": "tinloaf/home-assistant",
"id": "2711ac1ff11c8560f8e070d223a7eb8a27fae1a3",
"size": "6858",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/cmus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template
from flask_static_digest import FlaskStaticDigest
import argparse
from sqlalchemy import create_engine, MetaData
import config.aggregate
import config.config
# import config.ingest
import config.query
flask_static_digest = FlaskStaticDigest()
def create_app():
app = Flask(__name__)
app.config.from_object("config.settings")
flask_static_digest.init_app(app)
@app.route("/")
def index():
return render_template("index.html")
return app
def refine(config, args):
modified_aggregations = []
for agg in config.aggregations:
if args.only_compute is not None and not agg.name in args.only_compute:
continue
modified_bins = []
for b in agg.bins:
if args.only_bins is not None and not b.label in args.only_bins:
continue
modified_bins.append(b)
modified_stats = []
for s in agg.statistics:
if args.only_statistics is not None and not s.label in args.only_statistics:
continue
modified_stats.append(s)
modified_agg = piecewise.aggregate.Aggregation(
name = agg.name,
statistics_table_name = agg.statistics_table_name,
bins = modified_bins,
statistics = modified_stats)
modified_aggregations.append(modified_agg)
return piecewise.aggregate.Aggregator(
database_uri = config.database_uri,
cache_table_name = config.cache_table_name,
filters = config.filters,
aggregations = modified_aggregations)
def do_ingest(args):
config = piecewise.config.read_system_config()
config = refine(config, args)
if not args.debug:
piecewise.ingest.ingest(config)
else:
print ("Displaying bigquery SQL instead of performing query")
print (config.ingest_bigquery_query())
def do_aggregate(args):
config = piecewise.config.read_system_config()
config = refine(config, args)
if not args.debug:
piecewise.aggregate.aggregate(config)
else:
print ("Displaying Postgres SQL instead of performing query")
piecewise.aggregate.aggregate(config, args.debug)
def do_query(args):
from piecewise.aggregate import AverageRTT
config = piecewise.config.read_system_config()
config = refine(config, args)
aggregation = None
for agg in config.aggregations:
if agg.name == args.aggregation:
aggregation = agg
if args.stats is not None:
statistics = [piecewise.config.known_statistics[s] for s in args.stats]
else:
statistics = aggregation.statistics
if args.bins is not None:
bins = args.bins
else:
bins = dict()
if args.filters is not None:
filters = args.filters
else:
filters = dict()
if not args.debug:
results = piecewise.query.query(config, name, statistics, bins, filters)
for row in results:
print (row)
else:
engine = create_engine(config.database_uri)
metadata = MetaData()
engine.metadata = metadata
selection = aggregation.selection(engine, metadata, bins, filters, statistics)
print (selection.compile(engine))
def do_load(args):
do_ingest(args)
do_aggregate(args)
def do_display_config(args):
config = piecewise.config.read_system_config()
config = refine(config, args)
print ('Postgres connection: {}'.format(config.database_uri))
print ('Results cache table: {}'.format(config.cache_table_name))
print ('Filters:')
for filt in config.filters:
print ('\t{}'.format(filt))
print
print ('Aggregations:')
for agg in config.aggregations:
print ('\t{}'.format(agg.name))
print ('\t* Bin dimensions')
for b in agg.bins:
print ('\t\t{}: {}'.format(b.label, b))
print ('\t* Aggregate statistics')
for s in agg.statistics:
print ('\t\t{}'.format(s))
def add_ingest_args(parser):
pass
def add_aggregate_args(parser):
pass
def split_string(string):
return string.split(',')
def colon_dict(string):
pairs = string.split(',')
def as_pair(s):
if ':' in s:
return tuple(s.split(':', 1))
else:
return (s, '')
return dict(as_pair(p) for p in pairs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="piecewise", description="Download and aggregate m-lab internet performance data")
parser.add_argument("--debug", action='store_true', help = 'Display rather than execute queries')
parser.add_argument("--only-compute", type=split_string, help='Use only the named aggregations for this run')
parser.add_argument("--only-bins", type=split_string, help='Use only the named bin dimensions for this run')
parser.add_argument("--only-statistics", type=split_string, help='Use only the named statistics for this run')
subparsers = parser.add_subparsers(help="Operation")
ingest_parser = subparsers.add_parser('ingest', help='Pull data from BigQuery into postgres database')
add_ingest_args(ingest_parser)
ingest_parser.set_defaults(func=do_ingest)
aggregate_parser = subparsers.add_parser('aggregate', help='Compute statistics from ingested internet performance data')
add_aggregate_args(aggregate_parser)
aggregate_parser.set_defaults(func=do_aggregate)
display_config_parser = subparsers.add_parser("display-config", help='Display parsed configuration')
display_config_parser.set_defaults(func=do_display_config)
query_parser = subparsers.add_parser("query", help='Query statistics tables')
query_parser.add_argument("-b", "--bins", help="Select and configure bins for query", type=colon_dict)
query_parser.add_argument("-s", "--stats", help="Select statistics for query", type=split_string)
query_parser.add_argument("-f", "--filters", help="Select and configure filters for query", type=colon_dict)
query_parser.add_argument("aggregation", help="Select aggregation for query")
query_parser.set_defaults(func=do_query)
load_parser = subparsers.add_parser('load', help='Ingest and aggregate data in one run')
add_ingest_args(load_parser)
add_aggregate_args(load_parser)
load_parser.set_defaults(func=do_load)
args = parser.parse_args()
try:
func = args.func
except AttributeError:
parser.error("too few arguments")
func(args)
| {
"content_hash": "41faed95652c42ecfa25de6081584345",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 124,
"avg_line_length": 34.25130890052356,
"alnum_prop": 0.6566799143992663,
"repo_name": "critzo/piecewise",
"id": "eedcdf09a9d51183da8ba115c9a2e8a2f9184a70",
"size": "6542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57288"
},
{
"name": "Dockerfile",
"bytes": "996"
},
{
"name": "HTML",
"bytes": "66575"
},
{
"name": "JavaScript",
"bytes": "66481"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PLpgSQL",
"bytes": "1075"
},
{
"name": "Python",
"bytes": "72962"
},
{
"name": "Shell",
"bytes": "899"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import Gaffer
import GafferUI
## \todo Maths expressions to modify the existing value
## \todo Enter names of other plugs to create a connection
## \todo Color change for connected plugs and output plugs
## \todo Reject drag and drop of anything that's not a number
class NumericPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
self.__numericWidget = GafferUI.NumericWidget( 0 )
GafferUI.PlugValueWidget.__init__( self, self.__numericWidget, plug, **kw )
self._addPopupMenu( self.__numericWidget )
# we use these to decide which actions to merge into a single undo
self.__lastChangedReason = None
self.__mergeGroupId = 0
self.__keyPressConnection = self.__numericWidget.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
self.__valueChangedConnection = self.__numericWidget.valueChangedSignal().connect( Gaffer.WeakMethod( self.__valueChanged ) )
self._updateFromPlug()
self.__updateWidth()
def setPlug( self, plug ) :
GafferUI.PlugValueWidget.setPlug( self, plug )
self.__updateWidth()
def numericWidget( self ) :
return self.__numericWidget
def setHighlighted( self, highlighted ) :
GafferUI.PlugValueWidget.setHighlighted( self, highlighted )
self.numericWidget().setHighlighted( highlighted )
def getToolTip( self ) :
result = GafferUI.PlugValueWidget.getToolTip( self )
if self.getPlug() is not None :
result += "<ul>"
result += "<li>Cursor up/down to increment/decrement</li>"
result += "<ul>"
return result
def _updateFromPlug( self ) :
plug = self.getPlug()
if plug is not None :
with self.getContext() :
with Gaffer.BlockedConnection( self.__valueChangedConnection ) :
self.__numericWidget.setValue( plug.getValue() )
self.__numericWidget.setEditable( self._editable() )
def __keyPress( self, widget, event ) :
assert( widget is self.__numericWidget )
if not self.__numericWidget.getEditable() :
return False
# escape abandons everything
if event.key=="Escape" :
self._updateFromPlug()
return True
return False
def __valueChanged( self, widget, reason ) :
if self._editable() :
if not widget.changesShouldBeMerged( self.__lastChangedReason, reason ) :
self.__mergeGroupId += 1
self.__lastChangedReason = reason
self.__setPlugValue( mergeGroup = "NumericPlugValueWidget%d%d" % ( id( self, ), self.__mergeGroupId ) )
return False
def __setPlugValue( self, mergeGroup="" ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode.staticTypeId() ), mergeGroup=mergeGroup ) :
with Gaffer.BlockedConnection( self._plugConnections() ) :
try :
self.getPlug().setValue( self.__numericWidget.getValue() )
except :
pass
# now any changes that were made in the numeric widget have been transferred
# into the global undo queue, we remove the text editing changes from the
# widget's private text editing undo queue. it will then ignore undo shortcuts,
# allowing them to fall through to the global undo shortcut.
self.__numericWidget.clearUndo()
# we always need to update the ui from the plug after trying to set it,
# because the plug might clamp the value to something else. furthermore
# it might not even emit plugSetSignal if it happens to clamp to the same
# value as it had before. we block calls to _updateFromPlug() while setting
# the value to avoid having to do the work twice if plugSetSignal is emitted.
self._updateFromPlug()
def __updateWidth( self ) :
charWidth = None
if isinstance( self.getPlug(), Gaffer.IntPlug ) and self.getPlug().hasMaxValue() :
charWidth = len( str( self.getPlug().maxValue() ) )
self.__numericWidget.setFixedCharacterWidth( charWidth )
GafferUI.PlugValueWidget.registerType( Gaffer.FloatPlug.staticTypeId(), NumericPlugValueWidget )
GafferUI.PlugValueWidget.registerType( Gaffer.IntPlug.staticTypeId(), NumericPlugValueWidget )
| {
"content_hash": "d7029eb5da864206120d48fd8e69339d",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 127,
"avg_line_length": 32.66129032258065,
"alnum_prop": 0.7086419753086419,
"repo_name": "davidsminor/gaffer",
"id": "a06a87a81dcd349af5a7fb7088b32c7b37193972",
"size": "5934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferUI/NumericPlugValueWidget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9286"
},
{
"name": "C++",
"bytes": "3358250"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Python",
"bytes": "3267354"
},
{
"name": "Shell",
"bytes": "7055"
},
{
"name": "Slash",
"bytes": "35200"
}
],
"symlink_target": ""
} |
import simplejson
qwerty = r'''
`~ 1! 2@ 3# 4$ 5% 6^ 7& 8* 9( 0) -_ =+
qQ wW eE rR tT yY uU iI oO pP [{ ]} \|
aA sS dD fF gG hH jJ kK lL ;: '"
zZ xX cC vV bB nN mM ,< .> /?
'''
dvorak = r'''
`~ 1! 2@ 3# 4$ 5% 6^ 7& 8* 9( 0) [{ ]}
'" ,< .> pP yY fF gG cC rR lL /? =+ \|
aA oO eE uU iI dD hH tT nN sS -_
;: qQ jJ kK xX bB mM wW vV zZ
'''
keypad = r'''
/ * -
7 8 9 +
4 5 6
1 2 3
0 .
'''
mac_keypad = r'''
= / *
7 8 9 -
4 5 6 +
1 2 3
0 .
'''
def get_slanted_adjacent_coords(x, y):
'''
returns the six adjacent coordinates on a standard keyboard, where each row is slanted to the
right from the last. adjacencies are clockwise, starting with key to the left, then two keys
above, then right key, then two keys below. (that is, only near-diagonal keys are adjacent,
so g's coordinate is adjacent to those of t,y,b,v, but not those of r,u,n,c.)
'''
return [(x-1, y), (x, y-1), (x+1, y-1), (x+1, y), (x, y+1), (x-1, y+1)]
def get_aligned_adjacent_coords(x, y):
'''
returns the nine clockwise adjacent coordinates on a keypad, where each row is vert aligned.
'''
return [(x-1, y), (x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y), (x+1, y+1), (x, y+1), (x-1, y+1)]
def build_graph(layout_str, slanted):
'''
builds an adjacency graph as a dictionary: {character: [adjacent_characters]}.
adjacent characters occur in a clockwise order.
for example:
* on qwerty layout, 'g' maps to ['fF', 'tT', 'yY', 'hH', 'bB', 'vV']
* on keypad layout, '7' maps to [None, None, None, '=', '8', '5', '4', None]
'''
position_table = {} # maps from tuple (x,y) -> characters at that position.
tokens = layout_str.split()
token_size = len(tokens[0])
x_unit = token_size + 1 # x position unit len is token len plus 1 for the following whitespace.
adjacency_func = get_slanted_adjacent_coords if slanted else get_aligned_adjacent_coords
assert all(len(token) == token_size for token in tokens), 'token len mismatch:\n ' + layout_str
for y, line in enumerate(layout_str.split('\n')):
# the way I illustrated keys above, each qwerty row is indented one space in from the last
slant = y - 1 if slanted else 0
for token in line.split():
x, remainder = divmod(line.index(token) - slant, x_unit)
assert remainder == 0, 'unexpected x offset for %s in:\n%s' % (token, layout_str)
position_table[(x,y)] = token
adjacency_graph = {}
for (x,y), chars in position_table.iteritems():
for char in chars:
adjacency_graph[char] = []
for coord in adjacency_func(x, y):
# position in the list indicates direction
# (for qwerty, 0 is left, 1 is top, 2 is top right, ...)
# for edge chars like 1 or m, insert None as a placeholder when needed
# so that each character in the graph has a same-length adjacency list.
adjacency_graph[char].append(position_table.get(coord, None))
return adjacency_graph
if __name__ == '__main__':
with open('adjacency_graphs.coffee', 'w') as f:
f.write('# generated by scripts/build_keyboard_adjacency_graphs.py\n')
f.write('adjacency_graphs = \n ')
lines = []
for graph_name, args in [('qwerty', (qwerty, True)),
('dvorak', (dvorak, True)),
('keypad', (keypad, False)),
('mac_keypad', (mac_keypad, False))]:
graph = build_graph(*args)
lines.append('%s: %s' % (graph_name, simplejson.dumps(graph, sort_keys=True)))
f.write('\n '.join(lines))
f.write('\n\n')
f.write('module.exports = adjacency_graphs\n')
| {
"content_hash": "1534fe5d773c25c68a50f5c2fc9594ff",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 99,
"avg_line_length": 39.520833333333336,
"alnum_prop": 0.5632577754348972,
"repo_name": "shotishu/zxcvbn",
"id": "2b894088f47c75b71d7dd52a6a7ccde768916158",
"size": "3794",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "data-scripts/build_keyboard_adjacency_graphs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "762708"
},
{
"name": "HTML",
"bytes": "5218508"
},
{
"name": "JavaScript",
"bytes": "98228"
},
{
"name": "Python",
"bytes": "11061"
}
],
"symlink_target": ""
} |
from nose.tools import * # flake8: noqa
from website.models import NodeLog
from website.project.model import Auth
from website.util import permissions
from api.base.settings.defaults import API_BASE
from website.util import disconnected_from_listeners
from website.project.signals import contributor_removed
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
)
from tests.utils import assert_logs, assert_not_logs
class NodeCRUDTestCase(ApiTestCase):
def setUp(self):
super(NodeCRUDTestCase, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.title = 'Cool Project'
self.new_title = 'Super Cool Project'
self.description = 'A Properly Cool Project'
self.new_description = 'An even cooler project'
self.category = 'data'
self.new_category = 'project'
self.public_project = ProjectFactory(title=self.title,
description=self.description,
category=self.category,
is_public=True,
creator=self.user)
self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
self.private_project = ProjectFactory(title=self.title,
description=self.description,
category=self.category,
is_public=False,
creator=self.user)
self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_project._id)
self.fake_url = '/{}nodes/{}/'.format(API_BASE, '12345')
def make_node_payload(node, attributes):
return {
'data': {
'id': node._id,
'type': 'nodes',
'attributes': attributes,
}
}
class TestContributorDetail(NodeCRUDTestCase):
def setUp(self):
super(TestContributorDetail, self).setUp()
self.public_url = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.public_project._id, self.user._id)
self.private_url_base = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.private_project._id, '{}')
self.private_url = self.private_url_base.format(self.user._id)
def test_get_public_contributor_detail(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], '{}-{}'.format(self.public_project._id, self.user._id))
# regression test
def test_get_public_contributor_detail_is_viewable_through_browsable_api(self):
res = self.app.get(self.public_url + '?format=api')
assert_equal(res.status_code, 200)
def test_get_private_node_contributor_detail_contributor_auth(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], '{}-{}'.format(self.private_project._id, self.user._id))
def test_get_private_node_contributor_detail_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_get_private_node_contributor_detail_not_logged_in(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_get_private_node_non_contributor_detail_contributor_auth(self):
res = self.app.get(self.private_url_base.format(self.user_two._id), auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_private_node_invalid_user_detail_contributor_auth(self):
res = self.app.get(self.private_url_base.format('invalid'), auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_unregistered_contributor_detail_show_up_as_name_associated_with_project(self):
project = ProjectFactory(creator=self.user, is_public=True)
project.add_unregistered_contributor('Robert Jackson', 'robert@gmail.com', auth=Auth(self.user), save=True)
unregistered_contributor = project.contributors[1]
url = '/{}nodes/{}/contributors/{}/'.format(API_BASE, project._id, unregistered_contributor._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['embeds']['users']['data']['attributes']['full_name'], 'Robert Jackson')
assert_equal(res.json['data']['attributes'].get('unregistered_contributor'), 'Robert Jackson')
project_two = ProjectFactory(creator=self.user, is_public=True)
project_two.add_unregistered_contributor('Bob Jackson', 'robert@gmail.com', auth=Auth(self.user), save=True)
url = '/{}nodes/{}/contributors/{}/'.format(API_BASE, project_two._id, unregistered_contributor._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['embeds']['users']['data']['attributes']['full_name'], 'Robert Jackson')
assert_equal(res.json['data']['attributes'].get('unregistered_contributor'), 'Bob Jackson')
def test_detail_includes_index(self):
res = self.app.get(self.public_url)
data = res.json['data']
assert_in('index', data['attributes'].keys())
assert_equal(data['attributes']['index'], 0)
other_contributor = AuthUserFactory()
self.public_project.add_contributor(other_contributor, auth=Auth(self.user), save=True)
other_contributor_detail = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.public_project._id, other_contributor._id)
res = self.app.get(other_contributor_detail)
assert_equal(res.json['data']['attributes']['index'], 1)
class TestNodeContributorOrdering(ApiTestCase):
def setUp(self):
super(TestNodeContributorOrdering, self).setUp()
self.contributors = [AuthUserFactory() for number in range(1, 10)]
self.user_one = AuthUserFactory()
self.project = ProjectFactory(creator=self.user_one)
for contributor in self.contributors:
self.project.add_contributor(
contributor,
permissions=[permissions.READ, permissions.WRITE],
visible=True,
save=True
)
self.contributors.insert(0, self.user_one)
self.base_contributor_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.project._id)
self.url_creator = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user_one._id)
self.contributor_urls = ['/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, contributor._id)
for contributor in self.contributors]
self.last_position = len(self.contributors) - 1
@staticmethod
def _get_contributor_user_id(contributor):
return contributor['embeds']['users']['data']['id']
def test_initial_order(self):
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
found_contributors = False
for i in range(0, len(self.contributors)):
assert_equal(self.contributors[i]._id, self._get_contributor_user_id(contributor_list[i]))
assert_equal(i, contributor_list[i]['attributes']['index'])
found_contributors = True
assert_true(found_contributors, "Did not compare any contributors.")
@assert_logs(NodeLog.CONTRIB_REORDERED, 'project')
def test_move_top_contributor_down_one_and_also_log(self):
contributor_to_move = self.contributors[0]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_contributor = self.contributors[1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[1]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[0]), former_second_contributor._id)
def test_move_second_contributor_up_one_to_top(self):
contributor_to_move = self.contributors[1]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_first_contributor = self.contributors[0]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 0
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[0]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[1]), former_first_contributor._id)
def test_move_top_contributor_down_to_bottom(self):
contributor_to_move = self.contributors[0]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_contributor = self.contributors[1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': self.last_position
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[self.last_position]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[0]), former_second_contributor._id)
def test_move_bottom_contributor_up_to_top(self):
contributor_to_move = self.contributors[self.last_position]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_to_last_contributor = self.contributors[self.last_position - 1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 0
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[0]), contributor_to_move)
assert_equal(
self._get_contributor_user_id(contributor_list[self.last_position]),
former_second_to_last_contributor._id
)
def test_move_second_to_last_contributor_down_past_bottom(self):
contributor_to_move = self.contributors[self.last_position - 1]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_last_contributor = self.contributors[self.last_position]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': self.last_position + 10
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[self.last_position]), contributor_to_move)
assert_equal(
self._get_contributor_user_id(contributor_list[self.last_position - 1]),
former_last_contributor._id
)
def test_move_top_contributor_down_to_second_to_last_position_with_negative_numbers(self):
contributor_to_move = self.contributors[0]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_contributor = self.contributors[1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': -1
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=self.user_one.auth)
assert_equal(res_patch.status_code, 200)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[self.last_position - 1]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[0]), former_second_contributor._id)
def test_write_contributor_fails_to_move_top_contributor_down_one(self):
contributor_to_move = self.contributors[0]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_contributor = self.contributors[1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = self.app.patch_json_api(url, data, auth=former_second_contributor.auth, expect_errors=True)
assert_equal(res_patch.status_code, 403)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[0]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[1]), former_second_contributor._id)
def test_non_authenticated_fails_to_move_top_contributor_down_one(self):
contributor_to_move = self.contributors[0]._id
contributor_id = '{}-{}'.format(self.project._id, contributor_to_move)
former_second_contributor = self.contributors[1]
url = '{}{}/'.format(self.base_contributor_url, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = self.app.patch_json_api(url, data, expect_errors=True)
assert_equal(res_patch.status_code, 401)
self.project.reload()
res = self.app.get('/{}nodes/{}/contributors/'.format(API_BASE, self.project._id), auth=self.user_one.auth)
assert_equal(res.status_code, 200)
contributor_list = res.json['data']
assert_equal(self._get_contributor_user_id(contributor_list[0]), contributor_to_move)
assert_equal(self._get_contributor_user_id(contributor_list[1]), former_second_contributor._id)
class TestNodeContributorUpdate(ApiTestCase):
def setUp(self):
super(TestNodeContributorUpdate, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(self.user_two, permissions=[permissions.READ, permissions.WRITE], visible=True, save=True)
self.url_creator = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user._id)
self.url_contributor = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user_two._id)
def test_node_update_invalid_data(self):
res = self.app.put_json_api(self.url_creator, "Incorrect data", auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
res = self.app.put_json_api(self.url_creator, ["Incorrect data"], auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
def test_change_contributor_no_id(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_change_contributor_correct_id(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
def test_change_contributor_incorrect_id(self):
data = {
'data': {
'id': '12345',
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_change_contributor_no_type(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_change_contributor_incorrect_type(self):
data = {
'data': {
'id': self.user_two._id,
'type': 'Wrong type.',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
@assert_logs(NodeLog.PERMISSIONS_UPDATED, 'project', -3)
@assert_logs(NodeLog.PERMISSIONS_UPDATED, 'project', -2)
@assert_logs(NodeLog.PERMISSIONS_UPDATED, 'project')
def test_change_contributor_permissions(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.ADMIN)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE, permissions.ADMIN])
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.WRITE)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE])
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.READ)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ])
@assert_logs(NodeLog.MADE_CONTRIBUTOR_INVISIBLE, 'project', -2)
@assert_logs(NodeLog.MADE_CONTRIBUTOR_VISIBLE, 'project')
def test_change_contributor_bibliographic(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': False
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['bibliographic'], False)
self.project.reload()
assert_false(self.project.get_visible(self.user_two))
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['bibliographic'], True)
self.project.reload()
assert_true(self.project.get_visible(self.user_two))
@assert_logs(NodeLog.PERMISSIONS_UPDATED, 'project', -2)
@assert_logs(NodeLog.MADE_CONTRIBUTOR_INVISIBLE, 'project')
def test_change_contributor_permission_and_bibliographic(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.READ)
assert_equal(attributes['bibliographic'], False)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ])
assert_false(self.project.get_visible(self.user_two))
@assert_not_logs(NodeLog.PERMISSIONS_UPDATED, 'project')
def test_not_change_contributor(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': None,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.WRITE)
assert_equal(attributes['bibliographic'], True)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE])
assert_true(self.project.get_visible(self.user_two))
def test_invalid_change_inputs_contributor(self):
contrib_id = '{}-{}'.format(self.project._id, self.user_two._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': 'invalid',
'bibliographic': 'invalid'
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE])
assert_true(self.project.get_visible(self.user_two))
@assert_logs(NodeLog.PERMISSIONS_UPDATED, 'project')
def test_change_admin_self_with_other_admin(self):
self.project.add_permission(self.user_two, permissions.ADMIN, save=True)
contrib_id = '{}-{}'.format(self.project._id, self.user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_creator, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
assert_equal(attributes['permission'], permissions.WRITE)
self.project.reload()
assert_equal(self.project.get_permissions(self.user), [permissions.READ, permissions.WRITE])
def test_change_admin_self_without_other_admin(self):
contrib_id = '{}-{}'.format(self.project._id, self.user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = self.app.put_json_api(self.url_creator, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.project.reload()
assert_equal(self.project.get_permissions(self.user), [permissions.READ, permissions.WRITE, permissions.ADMIN])
def test_remove_all_bibliographic_statuses_contributors(self):
self.project.set_visible(self.user_two, False, save=True)
contrib_id = '{}-{}'.format(self.project._id, self.user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': False
}
}
}
res = self.app.put_json_api(self.url_creator, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.project.reload()
assert_true(self.project.get_visible(self.user))
def test_change_contributor_non_admin_auth(self):
data = {
'data': {
'id': self.user_two._id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = self.app.put_json_api(self.url_contributor, data, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE])
assert_true(self.project.get_visible(self.user_two))
def test_change_contributor_not_logged_in(self):
data = {
'data': {
'id': self.user_two._id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = self.app.put_json_api(self.url_contributor, data, expect_errors=True)
assert_equal(res.status_code, 401)
self.project.reload()
assert_equal(self.project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE])
assert_true(self.project.get_visible(self.user_two))
class TestNodeContributorPartialUpdate(ApiTestCase):
def setUp(self):
super(TestNodeContributorPartialUpdate, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(self.user_two, permissions=[permissions.READ, permissions.WRITE], visible=True, save=True)
self.url_creator = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user._id)
self.url_contributor = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user_two._id)
def test_patch_bibliographic_only(self):
creator_id = '{}-{}'.format(self.project._id, self.user._id)
data = {
'data': {
'id': creator_id,
'type': 'contributors',
'attributes': {
'bibliographic': False,
}
}
}
res = self.app.patch_json_api(self.url_creator, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(self.project.get_permissions(self.user), [permissions.READ, permissions.WRITE, permissions.ADMIN])
assert_false(self.project.get_visible(self.user))
def test_patch_permission_only(self):
user_three = AuthUserFactory()
self.project.add_contributor(user_three, permissions=[permissions.READ, permissions.WRITE], visible=False, save=True)
url_contributor = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, user_three._id)
contributor_id = '{}-{}'.format(self.project._id, user_three._id)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
}
}
}
res = self.app.patch_json_api(url_contributor, data, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(self.project.get_permissions(user_three), [permissions.READ])
assert_false(self.project.get_visible(user_three))
class TestNodeContributorDelete(ApiTestCase):
def setUp(self):
super(TestNodeContributorDelete, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.user_three = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(self.user_two, permissions=[permissions.READ, permissions.WRITE], visible=True, save=True)
self.url_user = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user._id)
self.url_user_two = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user_two._id)
self.url_user_three = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, self.user_three._id)
@assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_contributor_admin(self):
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = self.app.delete(self.url_user_two, auth=self.user.auth)
assert_equal(res.status_code, 204)
self.project.reload()
assert_not_in(self.user_two, self.project.contributors)
def test_remove_contributor_non_admin_is_forbidden(self):
self.project.add_contributor(self.user_three, permissions=[permissions.READ, permissions.WRITE], visible=True, save=True)
res = self.app.delete(self.url_user_three, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.project.reload()
assert_in(self.user_three, self.project.contributors)
@assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_self_non_admin(self):
self.project.add_contributor(self.user_three, permissions=[permissions.READ, permissions.WRITE], visible=True, save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = self.app.delete(self.url_user_three, auth=self.user_three.auth)
assert_equal(res.status_code, 204)
self.project.reload()
assert_not_in(self.user_three, self.project.contributors)
def test_remove_contributor_non_contributor(self):
res = self.app.delete(self.url_user_two, auth=self.user_three.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.project.reload()
assert_in(self.user_two, self.project.contributors)
def test_remove_contributor_not_logged_in(self):
res = self.app.delete(self.url_user_two, expect_errors=True)
assert_equal(res.status_code, 401)
self.project.reload()
assert_in(self.user_two, self.project.contributors)
def test_remove_non_contributor_admin(self):
assert_not_in(self.user_three, self.project.contributors)
res = self.app.delete(self.url_user_three, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
self.project.reload()
assert_not_in(self.user_three, self.project.contributors)
def test_remove_non_existing_user_admin(self):
url_user_fake = '/{}nodes/{}/contributors/{}/'.format(API_BASE, self.project._id, 'fake')
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = self.app.delete(url_user_fake, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
@assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_self_contributor_not_unique_admin(self):
self.project.add_permission(self.user_two, permissions.ADMIN, save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = self.app.delete(self.url_user, auth=self.user.auth)
assert_equal(res.status_code, 204)
self.project.reload()
assert_not_in(self.user, self.project.contributors)
@assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_can_remove_self_as_contributor_not_unique_admin(self):
self.project.add_permission(self.user_two, permissions.ADMIN, save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = self.app.delete(self.url_user_two, auth=self.user_two.auth)
assert_equal(res.status_code, 204)
self.project.reload()
assert_not_in(self.user_two, self.project.contributors)
def test_remove_self_contributor_unique_admin(self):
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = self.app.delete(self.url_user, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.project.reload()
assert_in(self.user, self.project.contributors)
def test_can_not_remove_only_bibliographic_contributor(self):
self.project.add_permission(self.user_two, permissions.ADMIN, save=True)
self.project.set_visible(self.user_two, False, save=True)
res = self.app.delete(self.url_user, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.project.reload()
assert_in(self.user, self.project.contributors)
| {
"content_hash": "1b75a48a0d18e23156ec2a16b8b1f5c7",
"timestamp": "",
"source": "github",
"line_count": 880,
"max_line_length": 130,
"avg_line_length": 44.26704545454545,
"alnum_prop": 0.5964574509048902,
"repo_name": "hmoco/osf.io",
"id": "866d371a19ac79698268c46bfb1b5ec5b33fb1fd",
"size": "38979",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api_tests/nodes/views/test_node_contributors_detail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "175175"
},
{
"name": "HTML",
"bytes": "193496"
},
{
"name": "JavaScript",
"bytes": "1690469"
},
{
"name": "Mako",
"bytes": "672179"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7856328"
}
],
"symlink_target": ""
} |
import os
import tempfile
import zipfile
import django_rq
from api.geovite_importer import import_records
from api.models import GeoVITeImportData
def extract_xml_paths(zip_file, directory):
paths = []
for file_name in zip_file.namelist():
if file_name.endswith('.xml'):
paths.append(os.path.dirname(os.path.join(directory, file_name)))
zip_file.extract(file_name, path=directory)
return set(paths)
def import_zipped_xml(instance_id):
geovite_importer_instance = GeoVITeImportData.objects.get(id=instance_id)
if not geovite_importer_instance.is_imported:
xml_zip_path = geovite_importer_instance.xml_zip.path
with tempfile.TemporaryDirectory() as temp_dir:
geovite_zip = zipfile.ZipFile(xml_zip_path)
for path in extract_xml_paths(geovite_zip, temp_dir):
import_records(path)
geovite_importer_instance.is_imported = True
geovite_importer_instance.save()
def delayed_xml_zip_import(sender, instance, **_):
django_rq.enqueue(import_zipped_xml, instance_id=instance.id)
| {
"content_hash": "ff57edde60430e17e740285cf853fd5d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 33.42424242424242,
"alnum_prop": 0.6944696282864914,
"repo_name": "geometalab/G4SE-Compass",
"id": "a4fa1b75adcd3b49de8ba22c6e8afd57cbf4d786",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "compass-api/G4SE/api/jobs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "905"
},
{
"name": "JavaScript",
"bytes": "30091"
},
{
"name": "Makefile",
"bytes": "1813"
},
{
"name": "Nginx",
"bytes": "96"
},
{
"name": "Python",
"bytes": "127269"
},
{
"name": "Shell",
"bytes": "4081"
},
{
"name": "Vue",
"bytes": "20180"
}
],
"symlink_target": ""
} |
"""
Requirements:
- requests (installation: pip install requests)
- lxml (installation: pip install lxml)
"""
import requests
import lxml.html
import os
def download_file(file_name, url):
#file_name = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(file_name, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return file_name
for i in range(1, 1381):
url = "http://www.europarl.europa.eu/RegistreWeb/search/typedoc.htm?codeTypeDocu=QECR&year=2015¤tPage={0}".format(i)
html = lxml.html.parse(url)
titles = [i.strip() for i in html.xpath("//div[contains(@class, 'notice')]/p[@class='title']/a/text()")]
docs = [i.strip() for i in html.xpath("//div[contains(@class, 'notice')]/ul/li/a/@href")]
q_refs = [i.strip() for i in html.xpath("//div[contains(@class, 'notice')]/div[@class='date_reference']/span[2]/text()")]
for title, doc, q_ref in zip(titles, docs, q_refs):
file_name = os.path.join(os.getcwd(),'data','-'.join(title.split('/'))+' '+q_ref+'.'+doc.split('.')[-1])
downloaded_file = download_file(file_name, doc)
print downloaded_file | {
"content_hash": "6f6c1b04631dceb06b99969152896b64",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 126,
"avg_line_length": 40.25806451612903,
"alnum_prop": 0.6225961538461539,
"repo_name": "fossasia/parliament-scaper",
"id": "161b3c5779379e8bc53e35ff1f2462dfd95e82cd",
"size": "1270",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "eu-scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8629"
},
{
"name": "Ruby",
"bytes": "1641"
},
{
"name": "Scala",
"bytes": "2597"
}
],
"symlink_target": ""
} |
import pytest
from requests import ConnectionError
from datadog_checks.riak import Riak
from . import common
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_check(aggregator, check, instance):
check.check(instance)
check.check(instance)
tags = ['my_tag']
sc_tags = tags + ['url:' + instance['url']]
for gauge in common.CHECK_GAUGES + common.CHECK_GAUGES_STATS:
aggregator.assert_metric(gauge, tags=tags, count=2)
aggregator.assert_service_check(common.SERVICE_CHECK_NAME, status=Riak.OK, tags=sc_tags)
for gauge in common.GAUGE_OTHER:
aggregator.assert_metric(gauge, count=1)
aggregator.all_metrics_asserted()
@pytest.mark.unit
def test_bad_config(aggregator, instance):
instance.update({"url": "http://localhost:5985"})
check = Riak('riak', {}, [instance])
with pytest.raises(ConnectionError):
check.check(instance)
sc_tags = ['my_tag', 'url:http://localhost:5985']
aggregator.assert_service_check(common.SERVICE_CHECK_NAME, status=Riak.CRITICAL, tags=sc_tags)
| {
"content_hash": "4967988428dd22e364ff768b71cb65c3",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 98,
"avg_line_length": 29.16216216216216,
"alnum_prop": 0.7052826691380908,
"repo_name": "DataDog/integrations-core",
"id": "f59e46e473911721661bb03863aa910b9b187609",
"size": "1189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "riak/tests/test_riak.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import, with_statement
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
from future.moves.urllib.parse import quote
import os
import re
import threading
import logging
from xml.etree.ElementTree import parse
import io
from uuid import uuid4
import time
from datetime import datetime, timedelta
from flexget.entry import Entry
from flexget.config_schema import register_config_key
from flexget.event import event
from flexget.manager import manager
from flexget.config_schema import one_or_more
from flexget.utils import requests
from flexget.utils.tools import get_config_hash
try:
from irc_bot.irc_bot import IRCBot, partial
from irc_bot import irc_bot
except ImportError as e:
irc_bot = None
IRCBot = object
log = logging.getLogger('irc')
MESSAGE_CLEAN = re.compile("\x0f|\x1f|\x02|\x03(?:[\d]{1,2}(?:,[\d]{1,2})?)?", re.MULTILINE | re.UNICODE)
URL_MATCHER = re.compile(r'(https?://[\da-z\.-]+\.[a-z\.]{2,6}[/\w\.-\?&]*/?)', re.MULTILINE | re.UNICODE)
channel_pattern = {
'type': 'string', 'pattern': '^([#&][^\x07\x2C\s]{0,200})',
'error_pattern': 'channel name must start with # or & and contain no commas and whitespace'
}
schema = {
'oneOf': [
{
'type': 'object',
'additionalProperties': {
'type': 'object',
'properties': {
'tracker_file': {'type': 'string'},
'server': {'type': 'string'},
'port': {'type': 'integer'},
'nickname': {'type': 'string'},
'channels': one_or_more(channel_pattern),
'nickserv_password': {'type': 'string'},
'invite_nickname': {'type': 'string'},
'invite_message': {'type': 'string'},
'task': one_or_more({
'type': 'string'
}),
'task_re': {
'type': 'object',
'additionalProperties': one_or_more({
'type': 'object',
'properties': {
'regexp': {'type': 'string'},
'field': {'type': 'string'}
},
'required': ['regexp', 'field'],
'additionalProperties': False
})
},
'queue_size': {'type': 'integer', 'default': 1},
'use_ssl': {'type': 'boolean', 'default': False},
'task_delay': {'type': 'integer'},
},
'anyOf': [
{'required': ['server', 'channels']},
{'required': ['tracker_file']}
],
'error_anyOf': 'Must specify a tracker file or server and channel(s)',
'oneOf': [
{'required': ['task']},
{'required': ['task_re']}
],
'error_oneOf': 'Must specify a task',
'required': ['port'],
'additionalProperties': {'type': 'string'},
}
},
{'type': 'boolean', 'enum': [False]},
]
}
# Global that holds all the IRCConnection instances
irc_connections = {}
# The manager object and thread
irc_manager = None
# To avoid having to restart the connections whenever the config updated event is fired (which is apparently a lot)
config_hash = {}
def create_thread(name, conn):
"""
Creates a new thread and starts it
:param conn: IRCConnection or IRCConnectionManager object
:return: Thread
"""
thread = threading.Thread(target=conn.start, name=name)
thread.setDaemon(True)
return thread
def irc_prefix(var):
"""
Prefix a string with the irc_
:param var: Variable to prefix
:return: Prefixed variable
"""
if isinstance(var, basestring):
return 'irc_%s' % var.lower()
def strip_whitespace(value):
"""
Remove leading and trailing whitespace from strings. Return value if not a string.
:param value:
:return: stripped string or value
"""
if isinstance(value, basestring):
return value.strip()
return value
class TrackerFileParseError(Exception):
"""Exception thrown when parsing the tracker file fails"""
class TrackerFileError(Exception):
"""Exception thrown when parsing the tracker file fails"""
class MissingConfigOption(Exception):
"""Exception thrown when a config option specified in the tracker file is not on the irc config"""
class IRCConnection(IRCBot):
def __init__(self, config, config_name):
self.config = config
self.connection_name = config_name
self.tracker_config = None
self.server_list = []
self.announcer_list = []
self.ignore_lines = []
self.message_regex = []
# If we have a tracker config file, load it
tracker_config_file = config.get('tracker_file')
if tracker_config_file:
self.tracker_config = self.retrieve_tracker_config(tracker_config_file)
channel_list = []
if self.tracker_config is not None:
# Validate config with the settings in the torrent file
for param in self.tracker_config.find('settings'):
# Handle textbox entries
if param.tag == 'textbox':
value_name = param.get('name')
else:
value_name = param.tag
# Strip the gazelle prefix
if value_name.startswith('gazelle_'):
value_name = value_name.replace('gazelle_', '')
# Skip descriptions
if 'description' in value_name:
continue
if self.config.get(value_name) is None:
raise MissingConfigOption('missing configuration option on irc config %s: %s' %
(self.connection_name, value_name))
# Get the tracker name, for use in the connection name
self.connection_name = self.tracker_config.get('longName', config_name)
# Extract the IRC server information
for server in self.tracker_config.find('servers'):
self.server_list.extend(server.get('serverNames').split(','))
channel_list.extend(server.get('channelNames').split(','))
self.announcer_list.extend(server.get('announcerNames').split(','))
# Process ignore lines
for regex_values in self.tracker_config.findall('parseinfo/ignore/regex'):
rx = re.compile(regex_values.get('value'), re.UNICODE | re.MULTILINE)
self.ignore_lines.append((rx, regex_values.get('expected') != 'false'))
# Parse patterns
self.multilinepatterns = self.parse_patterns(list(
self.tracker_config.findall('parseinfo/multilinepatterns/extract')))
self.linepatterns = self.parse_patterns(list(
self.tracker_config.findall('parseinfo/linepatterns/extract')))
# overwrite tracker config with flexget config
if self.config.get('server'):
self.server_list = [self.config['server']]
log.debug('Using server specified from config')
channels = config.get('channels')
if channels:
channel_list = channels if isinstance(channels, list) else [channels]
log.debug('Using channel(s) specified from config')
log.debug('Servers: %s', self.server_list)
log.debug('Channels: %s', channel_list)
log.debug('Announcers: %s', self.announcer_list)
log.debug('Ignore Lines: %d', len(self.ignore_lines))
log.debug('Message Regexs: %d', len(self.multilinepatterns) + len(self.linepatterns))
for rx, vals, optional in self.multilinepatterns:
msg = ' Multilinepattern "%s" extracts %s'
if optional:
msg += ' (optional)'
log.debug(msg, rx.pattern, vals)
for rx, vals, optional in self.linepatterns:
msg = ' Linepattern "%s" extracts %s'
if optional:
msg += ' (optional)'
log.debug(msg, rx.pattern, vals)
# Init the IRC Bot
ircbot_config = {'servers': self.server_list, 'port': config['port'], 'channels': channel_list,
'nickname': config.get('nickname', 'Flexget-%s' % str(uuid4())),
'invite_nickname': config.get('invite_nickname'),
'invite_message': config.get('invite_message'),
'nickserv_password': config.get('nickserv_password'),
'use_ssl': config.get('use_ssl')}
IRCBot.__init__(self, ircbot_config)
self.inject_before_shutdown = False
self.entry_queue = []
self.line_cache = {}
self.processing_message = False # if set to True, it means there's a message processing queued
self.thread = create_thread(self.connection_name, self)
@classmethod
def read_tracker_config(cls, path):
"""
Attempts to open and parse the .tracker file specified in path
:param path: path to .tracker file
:return: the parsed XML
"""
try:
tracker_config = parse(path).getroot()
except Exception as e:
raise TrackerFileParseError('Unable to parse tracker config file %s: %s' % (path, e))
else:
return tracker_config
@classmethod
def retrieve_tracker_config(cls, tracker_config_file):
"""
Will attempt to retrieve the .tracker file from disk or github. Returns the parsed XML.
:param tracker_config_file: URL or path to .tracker file
:return: parsed XML
"""
base_url = 'https://raw.githubusercontent.com/autodl-community/autodl-trackers/master/'
tracker_config_file = os.path.expanduser(tracker_config_file)
# First we attempt to find the file locally as-is
if os.path.exists(tracker_config_file):
log.debug('Found tracker file: %s', tracker_config_file)
return cls.read_tracker_config(tracker_config_file)
if not tracker_config_file.endswith('.tracker'):
tracker_config_file += '.tracker'
# Maybe the file is missing extension?
if os.path.exists(tracker_config_file):
log.debug('Found tracker file: %s', tracker_config_file)
return cls.read_tracker_config(tracker_config_file.rsplit('.tracker')[0])
# Check that containing dir exists, otherwise default to flexget_config_dir/trackers
if os.path.exists(os.path.dirname(tracker_config_file)):
base_dir = os.path.dirname(tracker_config_file)
else:
base_dir = os.path.abspath(os.path.join(manager.config_base, 'trackers'))
# Find the filenames for easy use later
tracker_name = os.path.basename(tracker_config_file)
tracker_name_no_ext = os.path.splitext(tracker_name)[0]
# One last try with case insensitive search!
if os.path.exists(base_dir):
files = os.listdir(base_dir)
for f in files:
if tracker_name_no_ext.lower() in f.lower():
found_path = os.path.join(base_dir, f)
log.debug('Found tracker file: %s', found_path)
return cls.read_tracker_config(found_path)
# Download from Github instead
if not os.path.exists(base_dir): # will only try to create the default `trackers` dir
try:
os.mkdir(base_dir)
except IOError as e:
raise TrackerFileError(e)
log.info('Tracker file not found on disk. Attempting to fetch tracker config file from Github.')
tracker = None
try:
tracker = requests.get(base_url + tracker_config_file)
except (requests.RequestException, IOError):
pass
if not tracker:
try:
log.debug('Trying to search list of tracker files on Github')
# Try to see if it's not found due to case sensitivity
trackers = requests.get('https://api.github.com/repos/autodl-community/'
'autodl-trackers/git/trees/master?recursive=1').json().get('tree', [])
for t in trackers:
name = t.get('path', '')
if not name.endswith('.tracker') or name.lower() != tracker_name.lower():
continue
tracker = requests.get(base_url + name)
tracker_name = name
break
except (requests.RequestException, IOError) as e:
raise TrackerFileError(e)
if not tracker:
raise TrackerFileError('Unable to find %s on disk or Github. Did you spell it correctly?' %
tracker_config_file)
# If we got this far, let's save our work :)
save_path = os.path.join(base_dir, tracker_name)
with io.open(save_path, 'wb') as tracker_file:
for chunk in tracker.iter_content(8192):
tracker_file.write(chunk)
return cls.read_tracker_config(save_path)
def is_alive(self):
return self.thread and self.thread.is_alive()
def parse_patterns(self, patterns):
"""
Parses the patterns and creates a tuple with the compiled regex pattern and the variables it produces
:param patterns: list of regex patterns as .tracker XML
:return: list of (regex, variables, optional)-pairs
"""
result = []
for pattern in patterns:
rx = re.compile(pattern.find('regex').get('value'), re.UNICODE | re.MULTILINE)
vals = [var.get('name') for idx, var in enumerate(pattern.find('vars'))]
optional = True if pattern.get('optional', 'false').lower() == 'true' else False
result.append((rx, vals, optional))
return result
def quit(self):
"""
Quit the IRC bot
:return:
"""
if self.inject_before_shutdown and self.entry_queue:
self.run_tasks()
IRCBot.quit(self)
def run_tasks(self):
"""
Passes entries to the target task(s) configured for this connection
:return:
"""
tasks = self.config.get('task')
tasks_re = self.config.get('task_re')
if tasks:
if isinstance(tasks, basestring):
tasks = [tasks]
log.debug('Injecting %d entries into tasks %s', len(self.entry_queue), ', '.join(tasks))
manager.execute(options={'tasks': tasks, 'cron': True, 'inject': self.entry_queue, 'allow_manual': True},
priority=5)
if tasks_re:
tasks_entry_map = {}
for entry in self.entry_queue:
matched = False
for task, config in tasks_re.items():
if isinstance(config, dict):
config = [config]
for c in config:
if re.search(c['regexp'], entry.get(c['field'], ''), re.IGNORECASE):
matched = True
if not tasks_entry_map.get(task):
tasks_entry_map[task] = []
tasks_entry_map[task].append(entry)
if not matched:
log.debug('Entry "%s" did not match any task regexp.', entry['title'])
for task, entries in tasks_entry_map.items():
log.debug('Injecting %d entries into task "%s"', len(entries), task)
manager.execute(options={'tasks': [task], 'cron': True, 'inject': entries, 'allow_manual': True},
priority=5)
self.entry_queue = []
def queue_entry(self, entry):
"""
Stores an entry in the connection entry queue, if the queue is over the size limit then submit them
:param entry: Entry to be queued
:return:
"""
self.entry_queue.append(entry)
log.debug('Entry: %s', entry)
if len(self.entry_queue) >= self.config['queue_size']:
if self.config.get('task_delay'):
self.schedule.queue_command(self.config['task_delay'], self.run_tasks, unique=False)
else:
self.run_tasks()
def match_message_patterns(self, patterns, msg):
"""
Tries to match the message to the list of patterns. Supports multiline messages.
:param patterns: list of (regex, variable)-pairs
:param msg: The parsed IRC message
:param multiline: True if msg is multiline
:return: A dict of the variables and their extracted values
"""
result = {}
for rx, vals, _ in patterns:
log.debug('Using pattern %s to parse message vars', rx.pattern)
match = rx.search(msg)
if match:
val_names = [irc_prefix(val.lower()) for val in vals]
val_values = [strip_whitespace(x) or '' for x in match.groups()]
result.update(dict(zip(val_names, val_values)))
log.debug('Found: %s', dict(zip(val_names, val_values)))
break
else:
log.debug('No matches found for %s in %s', rx.pattern, msg)
return result
def process_tracker_config_rules(self, entry, rules=None):
"""
Processes an Entry object with the linematched rules defined in a tracker config file
:param entry: Entry to be updated
:param rules: Ruleset to use.
:return:
"""
ignore_optionals = []
if rules is None:
rules = self.tracker_config.find('parseinfo/linematched')
# Make sure all irc fields from entry are in `fields`
fields = {key: val for key, val in entry.items() if key.startswith('irc_')}
for rule in rules:
log.debug('Processing rule %s' % rule.tag)
# Var - concat a var from other vars
if rule.tag == 'var':
result = ''
for element in rule:
if element.tag == 'string':
result += element.get('value')
elif element.tag in ['var', 'varenc']:
varname = element.get('name')
if irc_prefix(varname) in fields:
value = fields[irc_prefix(varname)]
elif self.config.get(varname):
value = self.config.get(varname)
else:
log.error('Missing variable %s from config, skipping rule', irc_prefix(varname))
break
if element.tag == 'varenc':
value = quote(value.encode('utf-8'))
result += value
else:
log.error('Unsupported var operation %s, skipping rule', element.tag)
break
else:
# Only set the result if we processed all elements
log.debug('Result for rule %s: %s=%s', rule.tag, rule.get('name'), result)
fields[irc_prefix(rule.get('name'))] = result
# Var Replace - replace text in a var
elif rule.tag == 'varreplace':
source_var = irc_prefix(rule.get('srcvar'))
target_var = irc_prefix(rule.get('name'))
regex = rule.get('regex')
replace = rule.get('replace')
if source_var and target_var and regex is not None and replace is not None and source_var in fields:
fields[target_var] = re.sub(regex, replace, fields[source_var])
log.debug('varreplace: %s=%s', target_var, fields[target_var])
else:
log.error('Invalid varreplace options, skipping rule')
# Extract - create multiple vars from a single regex
elif rule.tag == 'extract':
source_var = irc_prefix(rule.get('srcvar'))
if source_var not in fields:
if rule.get('optional', 'false') == 'false':
log.error('Error processing extract rule, non-optional value %s missing!', source_var)
ignore_optionals.append(source_var)
continue
if rule.find('regex') is not None:
regex = rule.find('regex').get('value')
else:
log.error('Regex option missing on extract rule, skipping rule')
continue
group_names = [irc_prefix(x.get('name')) for x in rule.find('vars') if x.tag == 'var']
match = re.search(regex, fields[source_var])
if match:
fields.update(dict(zip(group_names, match.groups())))
else:
log.debug('No match found for rule extract')
# Extract Tag - set a var if a regex matches a tag in a var
elif rule.tag == 'extracttags':
source_var = irc_prefix(rule.get('srcvar'))
split = rule.get('split')
if source_var in ignore_optionals:
continue
values = [strip_whitespace(x) for x in fields[source_var].split(split)]
for element in rule:
if element.tag == 'setvarif':
target_var = irc_prefix(element.get('varName'))
regex = element.get('regex')
value = element.get('value')
new_value = element.get('newValue')
if regex is not None:
found_match = False
for val in values:
match = re.match(regex, val)
if match:
fields[target_var] = val
found_match = True
if not found_match:
log.debug('No matches found for regex %s', regex)
elif value is not None and new_value is not None:
if value in values:
fields[target_var] = new_value
else:
log.debug('No match found for value %s in %s', value, source_var)
else:
log.error('Missing regex/value/newValue for setvarif command, ignoring')
# Extract One - extract one var from a list of regexes
elif rule.tag == 'extractone':
for element in rule:
if element.tag == 'extract':
source_var = irc_prefix(element.get('srcvar'))
if element.find('regex') is not None:
regex = element.find('regex').get('value')
else:
log.error('Regex option missing on extract rule, skipping.')
continue
if element.find('vars') is not None:
vars = [irc_prefix(var.get('name')) for var in element.find('vars')]
else:
log.error('No variable bindings found in extract rule, skipping.')
continue
match = re.match(regex, fields.get(source_var, ''))
if match:
fields.update(dict(zip(vars, match.groups())))
else:
log.debug('No match for extract with regex: %s', regex)
else:
log.error('Unsupported extractone tag: %s', element.tag)
# Set Regex - set a var if a regex matches
elif rule.tag == 'setregex':
source_var = irc_prefix(rule.get('srcvar'))
regex = rule.get('regex')
target_var = irc_prefix(rule.get('varName'))
target_val = rule.get('newValue')
if source_var and regex and target_var and target_val:
if source_var in fields and re.search(regex, fields[source_var]):
fields[target_var] = target_val
else:
log.error('Option missing on setregex, skipping rule')
# If statement
elif rule.tag == 'if':
source_var = irc_prefix(rule.get('srcvar'))
regex = rule.get('regex')
if source_var and regex:
if source_var in fields and re.match(regex, fields[source_var]):
fields.update(self.process_tracker_config_rules(fields, rule))
else:
log.error('Option missing for if statement, skipping rule')
else:
log.warning('Unsupported linematched tag: %s', rule.tag)
return fields
def on_privmsg(self, msg):
"""
Appends messages for the specific channel in the line cache. Schedules a message processing after 1s to
handle multiline announcements.
:param msg: IRCMessage object
:return:
"""
nickname = msg.from_nick
channel = msg.arguments[0]
if not irc_bot.is_channel(channel):
log.debug('Received msg is not a channel msg: %s', msg)
return
# set some defaults
self.line_cache.setdefault(channel, {})
self.line_cache[channel].setdefault(nickname, [])
self.line_cache[channel][nickname].append(msg.arguments[1])
if not self.processing_message:
# Schedule a parse of the message in 1 second (for multilines)
self.schedule.queue_command(1, partial(self.process_message, nickname, channel))
self.processing_message = True
def process_message(self, nickname, channel):
"""
Pops lines from the line cache and passes them to be parsed
:param str nickname: Nickname of who sent the message
:param str channel: Channel where the message originated from
:return: None
"""
# If we have announcers defined, ignore any messages not from them
if self.announcer_list and nickname not in self.announcer_list:
log.debug('Ignoring message: from non-announcer %s', nickname)
return
# Clean up the messages
lines = [MESSAGE_CLEAN.sub('', line) for line in self.line_cache[channel][nickname]]
log.debug('Received line(s): %s', u'\n'.join(lines))
# Generate some entries
if self.linepatterns:
entries = self.entries_from_linepatterns(lines)
elif self.multilinepatterns:
entries, lines = self.entries_from_multilinepatterns(lines)
else:
entries = self.entries_from_lines(lines)
for entry in entries:
# Process the generated entry through the linematched rules
if self.tracker_config is not None and entry:
entry.update(self.process_tracker_config_rules(entry))
elif self.tracker_config is not None:
log.error('Failed to parse message(s).')
return
entry['title'] = entry.get('irc_torrentname')
entry['url'] = entry.get('irc_torrenturl')
log.debug('Entry after processing: %s', dict(entry))
if not entry['url'] or not entry['title']:
log.error('Parsing message failed. Title=%s, url=%s.', entry['title'], entry['url'])
continue
log.verbose('IRC message in %s generated an entry: %s', channel, entry)
self.queue_entry(entry)
# reset the line cache
if self.multilinepatterns and lines:
self.line_cache[channel][nickname] = lines
log.debug('Left over lines: %s', '\n'.join(lines))
else:
self.line_cache[channel][nickname] = []
self.processing_message = False
def entries_from_linepatterns(self, lines):
"""
:param lines: list of lines from irc
:return list: list of entries generated from lines
"""
entries = []
for line in lines:
# If it's listed in ignore lines, skip it
ignore = False
for rx, expected in self.ignore_lines:
if rx.match(line) and expected:
log.debug('Ignoring message: matched ignore line')
ignore = True
break
if ignore:
continue
entry = Entry(irc_raw_message=line)
match = self.match_message_patterns(self.linepatterns, line)
# Generate the entry and process it through the linematched rules
if not match:
log.error('Failed to parse message. Skipping: %s', line)
continue
entry.update(match)
entries.append(entry)
return entries
def entries_from_multilinepatterns(self, lines):
"""
:param lines: list of lines
:return list: list of entries generated from lines
"""
entries = []
rest = [] # contains the rest of the lines
while len(lines) > 0:
entry = Entry()
raw_message = ''
matched_lines = []
for idx, (rx, vals, optional) in enumerate(self.multilinepatterns):
log.debug('Using pattern %s to parse message vars', rx.pattern)
# find the next candidate line
line = ''
for l in list(lines):
# skip ignored lines
for ignore_rx, expected in self.ignore_lines:
if ignore_rx.match(l) and expected:
log.debug('Ignoring message: matched ignore line')
lines.remove(l)
break
else:
line = l
break
raw_message += '\n' + line
match = self.match_message_patterns([(rx, vals, optional)], line)
if match:
entry.update(match)
matched_lines.append(line)
lines.remove(line)
elif optional:
log.debug('No match for optional extract pattern found.')
elif not line:
rest = matched_lines + lines
break
elif idx == 0: # if it's the first regex that fails, then it's probably just garbage
log.error('No matches found for pattern %s', rx.pattern)
lines.remove(line)
rest = lines
break
else:
log.error('No matches found for pattern %s', rx.pattern)
rest = lines
break
else:
entry['irc_raw_message'] = raw_message
entries.append(entry)
continue
return entries, rest
def entries_from_lines(self, lines):
"""
:param lines: list of lines
:return list: list of entries generated from lines
"""
entries = []
for line in lines:
entry = Entry(irc_raw_message=line)
# Use the message as title
entry['title'] = line
# find a url...
url_match = URL_MATCHER.findall(line)
if url_match:
# We have a URL(s)!, generate an entry
urls = list(url_match)
url = urls[-1]
entry.update({
'urls': urls,
'url': url,
})
if not entry.get('url'):
log.error('Parsing message failed. No url found.')
continue
entries.append(entry)
return entries
def is_connected(self):
return self.connected
def stop(self, wait):
if self.is_connected() and wait:
self.inject_before_shutdown = True
self.quit()
class IRCConnectionManager(object):
def __init__(self, config):
self.config = config
self.shutdown_event = threading.Event()
self.wait = False
self.delay = 30
self.thread = create_thread('irc_manager', self)
self.thread.start()
def is_alive(self):
return self.thread and self.thread.is_alive()
def start(self):
"""
Checks for dead threads and attempts to restart them. If the connection appears to be throttled, it won't
attempt to reconnect for 30s.
:return:
"""
global irc_connections
self.start_connections()
schedule = {} # used to keep track of reconnection schedules
while not self.shutdown_event.is_set():
for conn_name, conn in irc_connections.items():
# Don't want to revive if connection was closed cleanly
if not conn.running:
continue
now = datetime.now()
# Attempt to revive the thread if it has died. conn.running will be True if it died unexpectedly.
if not conn and self.config.get(conn_name):
try:
self.restart_connection(conn_name, self.config[conn_name])
except IOError as e:
log.error(e)
elif not conn.is_alive() and conn.running:
if conn_name not in schedule:
schedule[conn_name] = now + timedelta(seconds=5)
# add extra time if throttled
if conn.throttled:
schedule[conn_name] += timedelta(seconds=self.delay)
# is it time yet?
if schedule[conn_name] <= now:
log.error('IRC connection for %s has died unexpectedly. Restarting it.', conn_name)
try:
self.restart_connection(conn_name, conn.config)
except IOError as e:
log.error(e)
# remove it from the schedule
del schedule[conn_name]
time.sleep(1)
self.stop_connections(self.wait)
irc_connections = {}
def restart_connections(self, name=None):
if name:
self.restart_connection(name)
else:
for name, connection in irc_connections.items():
self.restart_connection(name, connection.config)
def restart_connection(self, name, config=None):
if not config:
config = irc_connections[name].config
if irc_connections[name].is_alive():
self.stop_connection(name)
irc_connections[name] = IRCConnection(config, name)
irc_connections[name].thread.start()
def start_connections(self):
"""
Start all the irc connections. Stop the daemon if there are failures.
:return:
"""
# First we validate the config for all connections including their .tracker files
for conn_name, config in self.config.items():
try:
log.info('Starting IRC connection for %s', conn_name)
conn = IRCConnection(config, conn_name)
irc_connections[conn_name] = conn
config_hash['names'][conn_name] = get_config_hash(config)
except (MissingConfigOption, TrackerFileParseError, TrackerFileError, IOError) as e:
log.error(e)
if conn_name in irc_connections:
del irc_connections[conn_name] # remove it from the list of connections
# Now we can start
for conn_name, connection in irc_connections.items():
connection.thread.start()
def stop_connections(self, wait, name=None):
if name:
self.stop_connection(name, wait)
else:
for name in irc_connections.keys():
self.stop_connection(name, wait)
def stop_connection(self, name, wait=False):
if irc_connections[name].is_alive():
irc_connections[name].stop(wait)
irc_connections[name].thread.join(11)
def stop(self, wait):
self.wait = wait
self.shutdown_event.set()
def status(self, name=None):
status = []
if name:
if name not in irc_connections:
raise ValueError('%s is not a valid irc connection' % name)
else:
status.append(self.status_dict(name))
else:
for n in irc_connections.keys():
status.append(self.status_dict(n))
return status
def status_dict(self, name):
status = {name: {}}
connection = irc_connections[name]
status[name]['alive'] = connection.is_alive()
status[name]['channels'] = [{key: value} for key, value in connection.channels.items()]
status[name]['connected_channels'] = connection.connected_channels
status[name]['server'] = connection.servers[0]
status[name]['port'] = connection.port
return status
def update_config(self, config):
new_irc_connections = {}
removed_connections = set(self.config.keys()) - set(config.keys())
for name, conf in config.items():
hash = get_config_hash(conf)
if name in self.config and config_hash['names'].get(name) == hash:
continue
try:
new_irc_connections[name] = IRCConnection(conf, name)
config_hash['names'][name] = hash
except (MissingConfigOption, TrackerFileParseError, TrackerFileError, IOError) as e:
log.error('Failed to update config. Error when updating %s: %s', name, e)
return
# stop connections that have been removed from config
for name in removed_connections:
self.stop_connection(name)
del irc_connections[name]
# and (re)start the new ones
for name, connection in new_irc_connections.items():
if name in irc_connections:
self.stop_connection(name)
irc_connections[name] = connection
connection.thread.start()
self.config = config
@event('manager.daemon.started')
def irc_start(manager):
irc_update_config(manager)
@event('manager.config_updated')
def irc_update_config(manager):
global irc_manager, config_hash
# Exit if we're not running daemon mode
if not manager.is_daemon:
return
config = manager.config.get('irc')
# No config, no connections
if not config:
log.debug('No irc connections defined in the config')
stop_irc(manager)
return
if irc_bot is None:
log.error('ImportError: irc_bot module not found. Shutting down daemon.')
stop_irc(manager)
manager.shutdown(finish_queue=False)
return
config_hash.setdefault('names', {})
new_config_hash = get_config_hash(config)
if config_hash.get('config') == new_config_hash:
log.verbose('IRC config has not been changed. Not reloading any connections.')
return
config_hash['manager'] = new_config_hash
if irc_manager is not None and irc_manager.is_alive():
irc_manager.update_config(config)
else:
irc_manager = IRCConnectionManager(config)
@event('manager.shutdown_requested')
def shutdown_requested(manager):
stop_irc(manager, wait=True)
@event('manager.shutdown')
def stop_irc(manager, wait=False):
if irc_manager is not None and irc_manager.is_alive():
log.info('Shutting down IRC.')
irc_manager.stop(wait)
# this check is necessary for when the irc manager is the one shutting down the daemon
# a thread can't join itself
if not threading.current_thread() == irc_manager.thread:
# It's important to give the threads time to shut down to avoid socket issues later (eg. quick restart)
irc_manager.thread.join(len(irc_connections.keys()) * 11)
@event('config.register')
def register_plugin():
register_config_key('irc', schema)
| {
"content_hash": "64fc910231b175175646d4c4da6f5171",
"timestamp": "",
"source": "github",
"line_count": 1020,
"max_line_length": 117,
"avg_line_length": 40.40294117647059,
"alnum_prop": 0.5399529251898765,
"repo_name": "qk4l/Flexget",
"id": "23f4a7fc052ec2f094771fe10031c94b6367d824",
"size": "41211",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/plugins/daemon/irc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "79376"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3322934"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
} |
from netforce.model import get_model
from netforce import migration
from netforce.database import get_connection
class Migration(migration.Migration):
_name="stock.stock_cost_amount"
_version="3.1.0"
def migrate(self):
for move in get_model("stock.move").search_browse([["cost_amount","=",None]]):
cost_price=move.unit_price or 0
cost_amount=(move.qty or 0)*cost_price
move.write({"cost_price": cost_price, "cost_amount": cost_amount})
Migration.register()
| {
"content_hash": "f94e0423e9527b2b000fa5e2aa9472bf",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 86,
"avg_line_length": 34.4,
"alnum_prop": 0.6724806201550387,
"repo_name": "sidzan/netforce",
"id": "64d7b5128da93bb04831318a04bcaa50436c30d0",
"size": "516",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "netforce_stock/netforce_stock/migrations/stock_cost_amount.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "248732"
},
{
"name": "HTML",
"bytes": "543196"
},
{
"name": "Java",
"bytes": "11870"
},
{
"name": "JavaScript",
"bytes": "3659528"
},
{
"name": "Makefile",
"bytes": "353"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3263548"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
} |
'''
Module for generic helpers
'''
from itertools import product
def pin_version(package_name, version_steps, exact=False):
'''
create a tuple of strings with package and pinned versions.
:param package_name: package name to be pinned
:param version_steps: an iterable of versions for the given package name
:param exact: same as in `generate_matrix`
'''
exact_format = '{0}=={1}'
nonexact_format = '{0}>={1},<{2}'
ret = []
if exact:
for version_string in version_steps:
ret.append(exact_format.format(package_name, version_string))
else:
current = None
previous = None
for version_string in version_steps:
current = version_string
if previous is not None:
ret.append(nonexact_format.format(package_name, previous, current))
previous = current
return tuple(ret)
def generate_matrix(python_versions, packages, version_steps, pin_exact=False):
'''
returns a matrix of packages and its versions including different python
versions
:param python_versions: iterable of python version strings e.g. ('2.7', '3.3')
:param packages: iterable of package names e.g. ('Django', 'Fabric')
:param version_steps:
iterable of iterables of version strings from above packages
e.g. (('1.5', '1.6', '1.7'), ('1.6', '1.7', '1.8'))
:param pin_exact:
defines whether to pin exactly above version.
if *True* then use exact specified versions
(e.g. to produce something like ('Django==1.5', 'Django==1.6', 'Django==1.7'))
if *False* then use specified versions as boundaries
(e.g. to produce something like ('Django>=1.5,<1.6', 'Django>=1.6,<1.7')
:return list:
an iterable of two tuples e.g.::
[('python2.7', (
('Django>=1.5,<1.6', 'Fabric>=1.6,<1.7'),
('Django>=1.5,<1.6', 'Fabric>=1.7,<1.8'),
('Django>=1.6,<1.7', 'Fabric>=1.6,<1.7'),
('Django>=1.6,<1.7', 'Fabric>=1.7,<1.8'),
)
),
('python3.3', (
('Django>=1.5,<1.6', 'Fabric>=1.6,<1.7'),
('Django>=1.5,<1.6', 'Fabric>=1.7,<1.8'),
('Django>=1.6,<1.7', 'Fabric>=1.6,<1.7'),
('Django>=1.6,<1.7', 'Fabric>=1.7,<1.8'),
)
),
]
'''
ret = []
dependencies = []
for package, versions in zip(packages, version_steps):
package_deps = pin_version(package, versions, pin_exact)
dependencies.append(package_deps)
gen = product(*dependencies)
deps_for_py = tuple(gen)
for pyver in python_versions:
semi_deps = tuple(['python{0}'.format(pyver), deps_for_py])
ret.append(semi_deps)
return tuple(ret)
| {
"content_hash": "bdcd86d7e4e1d54c3caed513c0972b58",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 86,
"avg_line_length": 30.178947368421053,
"alnum_prop": 0.5538890826648064,
"repo_name": "slafs/tox-matrix",
"id": "0c79852324b58fd477337119c84b0203b92b5a38",
"size": "2889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toxmatrix/lib.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "22505"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
import re
import numbers
from collections import namedtuple
from .shapes import *
LAYER_DESCRIPTORS = {
# Caffe Types
'AbsVal': shape_identity,
'Accuracy': shape_scalar,
'ArgMax': shape_not_implemented,
'BatchNorm': shape_identity,
'BNLL': shape_not_implemented,
'Concat': shape_concat,
'ContrastiveLoss': shape_scalar,
'Convolution': shape_convolution,
'Deconvolution': shape_not_implemented,
'Data': shape_data,
'Dropout': shape_identity,
'DummyData': shape_data,
'EuclideanLoss': shape_scalar,
'Eltwise': shape_identity,
'Exp': shape_identity,
'Flatten': shape_not_implemented,
'HDF5Data': shape_data,
'HDF5Output': shape_identity,
'HingeLoss': shape_scalar,
'Im2col': shape_not_implemented,
'ImageData': shape_data,
'InfogainLoss': shape_scalar,
'InnerProduct': shape_inner_product,
'Input': shape_data,
'LRN': shape_identity,
'MemoryData': shape_mem_data,
'MultinomialLogisticLoss': shape_scalar,
'MVN': shape_not_implemented,
'Pooling': shape_pool,
'Power': shape_identity,
'ReLU': shape_identity,
'Scale': shape_identity,
'Sigmoid': shape_identity,
'SigmoidCrossEntropyLoss': shape_scalar,
'Silence': shape_not_implemented,
'Softmax': shape_identity,
'SoftmaxWithLoss': shape_scalar,
'Split': shape_not_implemented,
'Slice': shape_not_implemented,
'TanH': shape_identity,
'WindowData': shape_not_implemented,
'Threshold': shape_identity,
}
LAYER_TYPES = LAYER_DESCRIPTORS.keys()
LayerType = type('LayerType', (), {t: t for t in LAYER_TYPES})
class NodeKind(LayerType):
@staticmethod
def map_raw_kind(kind):
if kind in LAYER_TYPES:
return kind
return None
@staticmethod
def compute_output_shape(node):
try:
val = LAYER_DESCRIPTORS[node.kind](node)
return val
except NotImplementedError:
raise KaffeError('Output shape computation not implemented for type: %s' % node.kind)
class NodeDispatchError(KaffeError):
pass
class NodeDispatch(object):
@staticmethod
def get_handler_name(node_kind):
if len(node_kind) <= 4:
# A catch-all for things like ReLU and tanh
return node_kind.lower()
# Convert from CamelCase to under_scored
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', node_kind)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
def get_handler(self, node_kind, prefix):
name = self.get_handler_name(node_kind)
name = '_'.join((prefix, name))
try:
return getattr(self, name)
except AttributeError:
raise NodeDispatchError('No handler found for node kind: %s (expected: %s)' %
(node_kind, name))
class LayerAdapter(object):
def __init__(self, layer, kind):
self.layer = layer
self.kind = kind
@property
def parameters(self):
name = NodeDispatch.get_handler_name(self.kind)
name = '_'.join((name, 'param'))
try:
return getattr(self.layer, name)
except AttributeError:
raise NodeDispatchError('Caffe parameters not found for layer kind: %s' % (self.kind))
@staticmethod
def get_kernel_value(scalar, repeated, idx, default=None):
if scalar:
return scalar
if repeated:
if isinstance(repeated, numbers.Number):
return repeated
if len(repeated) == 1:
# Same value applies to all spatial dimensions
return int(repeated[0])
assert idx < len(repeated)
# Extract the value for the given spatial dimension
return repeated[idx]
if default is None:
raise ValueError('Unable to determine kernel parameter!')
return default
@property
def kernel_parameters(self):
assert self.kind in (NodeKind.Convolution, NodeKind.Pooling)
params = self.parameters
k_h = self.get_kernel_value(params.kernel_h, params.kernel_size, 0)
k_w = self.get_kernel_value(params.kernel_w, params.kernel_size, 1)
s_h = self.get_kernel_value(params.stride_h, params.stride, 0, default=1)
s_w = self.get_kernel_value(params.stride_w, params.stride, 1, default=1)
p_h = self.get_kernel_value(params.pad_h, params.pad, 0, default=0)
p_w = self.get_kernel_value(params.pad_h, params.pad, 1, default=0)
return KernelParameters(k_h, k_w, s_h, s_w, p_h, p_w)
KernelParameters = namedtuple('KernelParameters', ['kernel_h', 'kernel_w', 'stride_h', 'stride_w',
'pad_h', 'pad_w'])
| {
"content_hash": "9ac38ed00b49a4ea82a409965f05ac5b",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 98,
"avg_line_length": 32.53061224489796,
"alnum_prop": 0.6116687578419071,
"repo_name": "vishaalprasad/AnimeRecommendation",
"id": "c3c595588512cf82511fc3139a76a3bf4e2f511c",
"size": "4782",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "illustration2vec/kaffe/layers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "456664"
},
{
"name": "Python",
"bytes": "74514"
}
],
"symlink_target": ""
} |
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.compute.v2_1 import flavors as schema
from tempest.lib.api_schema.response.compute.v2_1 import flavors_access \
as schema_access
from tempest.lib.api_schema.response.compute.v2_1 import flavors_extra_specs \
as schema_extra_specs
from tempest.lib.common import rest_client
class FlavorsClient(rest_client.RestClient):
def list_flavors(self, detail=False, **params):
url = 'flavors'
_schema = schema.list_flavors
if detail:
url += '/detail'
_schema = schema.list_flavors_details
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(_schema, resp, body)
return rest_client.ResponseBody(resp, body)
def show_flavor(self, flavor_id):
resp, body = self.get("flavors/%s" % flavor_id)
body = json.loads(body)
self.validate_response(schema.create_get_flavor_details, resp, body)
return rest_client.ResponseBody(resp, body)
def create_flavor(self, **kwargs):
"""Create a new flavor or instance type.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#create-flavors
"""
if kwargs.get('ephemeral'):
kwargs['OS-FLV-EXT-DATA:ephemeral'] = kwargs.pop('ephemeral')
if kwargs.get('is_public'):
kwargs['os-flavor-access:is_public'] = kwargs.pop('is_public')
post_body = json.dumps({'flavor': kwargs})
resp, body = self.post('flavors', post_body)
body = json.loads(body)
self.validate_response(schema.create_get_flavor_details, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_flavor(self, flavor_id):
"""Delete the given flavor."""
resp, body = self.delete("flavors/{0}".format(flavor_id))
self.validate_response(schema.delete_flavor, resp, body)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
# Did not use show_flavor(id) for verification as it gives
# 200 ok even for deleted id. LP #981263
# we can remove the loop here and use get by ID when bug gets sortedout
flavors = self.list_flavors(detail=True)['flavors']
for flavor in flavors:
if flavor['id'] == id:
return False
return True
@property
def resource_type(self):
"""Return the primary type of resource this client works with."""
return 'flavor'
def set_flavor_extra_spec(self, flavor_id, **kwargs):
"""Set extra Specs to the mentioned flavor.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#updateFlavorExtraSpec
"""
post_body = json.dumps({'extra_specs': kwargs})
resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
post_body)
body = json.loads(body)
self.validate_response(schema_extra_specs.set_get_flavor_extra_specs,
resp, body)
return rest_client.ResponseBody(resp, body)
def list_flavor_extra_specs(self, flavor_id):
"""Get extra Specs details of the mentioned flavor."""
resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id)
body = json.loads(body)
self.validate_response(schema_extra_specs.set_get_flavor_extra_specs,
resp, body)
return rest_client.ResponseBody(resp, body)
def show_flavor_extra_spec(self, flavor_id, key):
"""Get extra Specs key-value of the mentioned flavor and key."""
resp, body = self.get('flavors/%s/os-extra_specs/%s' % (flavor_id,
key))
body = json.loads(body)
self.validate_response(
schema_extra_specs.set_get_flavor_extra_specs_key,
resp, body)
return rest_client.ResponseBody(resp, body)
def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
"""Update specified extra Specs of the mentioned flavor and key.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#updateflavorspec
"""
resp, body = self.put('flavors/%s/os-extra_specs/%s' %
(flavor_id, key), json.dumps(kwargs))
body = json.loads(body)
self.validate_response(
schema_extra_specs.set_get_flavor_extra_specs_key,
resp, body)
return rest_client.ResponseBody(resp, body)
def unset_flavor_extra_spec(self, flavor_id, key):
"""Unset extra Specs from the mentioned flavor."""
resp, body = self.delete('flavors/%s/os-extra_specs/%s' %
(flavor_id, key))
self.validate_response(schema.unset_flavor_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def list_flavor_access(self, flavor_id):
"""Get flavor access information given the flavor id."""
resp, body = self.get('flavors/%s/os-flavor-access' % flavor_id)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return rest_client.ResponseBody(resp, body)
def add_flavor_access(self, flavor_id, tenant_id):
"""Add flavor access for the specified tenant."""
post_body = {
'addTenantAccess': {
'tenant': tenant_id
}
}
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return rest_client.ResponseBody(resp, body)
def remove_flavor_access(self, flavor_id, tenant_id):
"""Remove flavor access from the specified tenant."""
post_body = {
'removeTenantAccess': {
'tenant': tenant_id
}
}
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return rest_client.ResponseBody(resp, body)
| {
"content_hash": "176bb35357d00792fc5766f43f055b57",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 79,
"avg_line_length": 41.36645962732919,
"alnum_prop": 0.5992492492492493,
"repo_name": "nuagenetworks/tempest",
"id": "50f1dccc9e8ca2ee0d07dc0579891a8f9f418860",
"size": "7296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/lib/services/compute/flavors_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from extended_signals.signals import form_pre_init, form_post_init, form_clean, form_pre_save, form_post_save
class FormSignalsMixin(object):
def __init__(self, *args, **kwargs):
form_pre_init.send(sender=self.__class__, instance=self, args=args, kwargs=kwargs)
super().__init__(*args, **kwargs)
form_post_init.send(sender=self.__class__, instance=self, args=args, kwargs=kwargs)
def clean(self):
super().clean()
form_clean.send(sender=self.__class__, instance=self)
return getattr(self, 'cleaned_data', {})
def save(self, *args, **kwargs):
form_pre_save.send(sender=self.__class__, instance=self, args=args, kwargs=kwargs)
super().save(*args, **kwargs)
form_post_save.send(sender=self.__class__, instance=self, args=args, kwargs=kwargs) | {
"content_hash": "c2621f0a520e854bd6eabcfbe789f3f2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 109,
"avg_line_length": 45.833333333333336,
"alnum_prop": 0.6472727272727272,
"repo_name": "biljettshop/django_extended_signals",
"id": "6b55b959e8f844479216f2315d06273b60b17dd5",
"size": "825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extended_signals/mixins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2283"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submit_reports', '0002_auto_20160324_2152'),
]
operations = [
migrations.AlterField(
model_name='submitreport',
name='end_time',
field=models.TimeField(),
),
migrations.AlterField(
model_name='submitreport',
name='start_time',
field=models.TimeField(),
),
]
| {
"content_hash": "25b0c709aea60cbc05ef236688aa2a4e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 54,
"avg_line_length": 23.043478260869566,
"alnum_prop": 0.5641509433962264,
"repo_name": "ServiceLearningB/ServiceLearning",
"id": "e66212de112ef55a9dcc7895504416accf2f732b",
"size": "602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "submit_reports/migrations/0003_auto_20160324_2245.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46948"
},
{
"name": "HTML",
"bytes": "5674"
},
{
"name": "JavaScript",
"bytes": "89976"
},
{
"name": "Python",
"bytes": "39943"
}
],
"symlink_target": ""
} |
"""FeatureFlags API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class FeatureFlagsAPI(BaseCanvasAPI):
"""FeatureFlags API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for FeatureFlagsAPI."""
super(FeatureFlagsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.FeatureFlagsAPI")
def list_features_courses(self, course_id):
"""
List features.
A paginated list of all features that apply to a given Account, Course, or User.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/features with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/features".format(**path),
data=data,
params=params,
all_pages=True,
)
def list_features_accounts(self, account_id):
"""
List features.
A paginated list of all features that apply to a given Account, Course, or User.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
self.logger.debug(
"GET /api/v1/accounts/{account_id}/features with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/accounts/{account_id}/features".format(**path),
data=data,
params=params,
all_pages=True,
)
def list_features_users(self, user_id):
"""
List features.
A paginated list of all features that apply to a given Account, Course, or User.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""
ID
"""
path["user_id"] = user_id
self.logger.debug(
"GET /api/v1/users/{user_id}/features with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/users/{user_id}/features".format(**path),
data=data,
params=params,
all_pages=True,
)
def list_enabled_features_courses(self, course_id):
"""
List enabled features.
A paginated list of all features that are enabled on a given Account, Course, or User.
Only the feature names are returned.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/features/enabled with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/features/enabled".format(**path),
data=data,
params=params,
no_data=True,
)
def list_enabled_features_accounts(self, account_id):
"""
List enabled features.
A paginated list of all features that are enabled on a given Account, Course, or User.
Only the feature names are returned.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
self.logger.debug(
"GET /api/v1/accounts/{account_id}/features/enabled with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/accounts/{account_id}/features/enabled".format(**path),
data=data,
params=params,
no_data=True,
)
def list_enabled_features_users(self, user_id):
"""
List enabled features.
A paginated list of all features that are enabled on a given Account, Course, or User.
Only the feature names are returned.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""
ID
"""
path["user_id"] = user_id
self.logger.debug(
"GET /api/v1/users/{user_id}/features/enabled with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/users/{user_id}/features/enabled".format(**path),
data=data,
params=params,
no_data=True,
)
def list_environment_features(self):
"""
List environment features.
Return a hash of global feature settings that pertain to the
Canvas user interface. This is the same information supplied to the
web interface as +ENV.FEATURES+.
"""
path = {}
data = {}
params = {}
self.logger.debug(
"GET /api/v1/features/environment with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/features/environment".format(**path),
data=data,
params=params,
no_data=True,
)
def get_feature_flag_courses(self, course_id, feature):
"""
Get feature flag.
Get the feature flag that applies to a given Account, Course, or User.
The flag may be defined on the object, or it may be inherited from a parent
account. You can look at the context_id and context_type of the returned object
to determine which is the case. If these fields are missing, then the object
is the global Canvas default.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - feature
"""
ID
"""
path["feature"] = feature
self.logger.debug(
"GET /api/v1/courses/{course_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/features/flags/{feature}".format(**path),
data=data,
params=params,
single_item=True,
)
def get_feature_flag_accounts(self, account_id, feature):
"""
Get feature flag.
Get the feature flag that applies to a given Account, Course, or User.
The flag may be defined on the object, or it may be inherited from a parent
account. You can look at the context_id and context_type of the returned object
to determine which is the case. If these fields are missing, then the object
is the global Canvas default.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# REQUIRED - PATH - feature
"""
ID
"""
path["feature"] = feature
self.logger.debug(
"GET /api/v1/accounts/{account_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/accounts/{account_id}/features/flags/{feature}".format(**path),
data=data,
params=params,
single_item=True,
)
def get_feature_flag_users(self, feature, user_id):
"""
Get feature flag.
Get the feature flag that applies to a given Account, Course, or User.
The flag may be defined on the object, or it may be inherited from a parent
account. You can look at the context_id and context_type of the returned object
to determine which is the case. If these fields are missing, then the object
is the global Canvas default.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""
ID
"""
path["user_id"] = user_id
# REQUIRED - PATH - feature
"""
ID
"""
path["feature"] = feature
self.logger.debug(
"GET /api/v1/users/{user_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/users/{user_id}/features/flags/{feature}".format(**path),
data=data,
params=params,
single_item=True,
)
def set_feature_flag_courses(self, course_id, feature, state=None):
"""
Set feature flag.
Set a feature flag for a given Account, Course, or User. This call will fail if a parent account sets
a feature flag for the same feature in any state other than "allowed".
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - feature
"""
ID
"""
path["feature"] = feature
# OPTIONAL - state
"""
"off":: The feature is not available for the course, user, or account and sub-accounts.
"allowed":: (valid only on accounts) The feature is off in the account, but may be enabled in
sub-accounts and courses by setting a feature flag on the sub-account or course.
"on":: The feature is turned on unconditionally for the user, course, or account and sub-accounts.
"""
if state is not None:
self._validate_enum(state, ["off", "allowed", "on"])
data["state"] = state
self.logger.debug(
"PUT /api/v1/courses/{course_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/v1/courses/{course_id}/features/flags/{feature}".format(**path),
data=data,
params=params,
single_item=True,
)
def set_feature_flag_accounts(self, account_id, feature, state=None):
"""
Set feature flag.
Set a feature flag for a given Account, Course, or User. This call will fail if a parent account sets
a feature flag for the same feature in any state other than "allowed".
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# REQUIRED - PATH - feature
"""
ID
"""
path["feature"] = feature
# OPTIONAL - state
"""
"off":: The feature is not available for the course, user, or account and sub-accounts.
"allowed":: (valid only on accounts) The feature is off in the account, but may be enabled in
sub-accounts and courses by setting a feature flag on the sub-account or course.
"on":: The feature is turned on unconditionally for the user, course, or account and sub-accounts.
"""
if state is not None:
self._validate_enum(state, ["off", "allowed", "on"])
data["state"] = state
self.logger.debug(
"PUT /api/v1/accounts/{account_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/v1/accounts/{account_id}/features/flags/{feature}".format(**path),
data=data,
params=params,
single_item=True,
)
def set_feature_flag_users(self, feature, user_id, state=None):
"""
Set feature flag.
Set a feature flag for a given Account, Course, or User. This call will fail if a parent account sets
a feature flag for the same feature in any state other than "allowed".
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""
ID
"""
path["user_id"] = user_id
# REQUIRED - PATH - feature
"""
ID
"""
path["feature"] = feature
# OPTIONAL - state
"""
"off":: The feature is not available for the course, user, or account and sub-accounts.
"allowed":: (valid only on accounts) The feature is off in the account, but may be enabled in
sub-accounts and courses by setting a feature flag on the sub-account or course.
"on":: The feature is turned on unconditionally for the user, course, or account and sub-accounts.
"""
if state is not None:
self._validate_enum(state, ["off", "allowed", "on"])
data["state"] = state
self.logger.debug(
"PUT /api/v1/users/{user_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/v1/users/{user_id}/features/flags/{feature}".format(**path),
data=data,
params=params,
single_item=True,
)
def remove_feature_flag_courses(self, course_id, feature):
"""
Remove feature flag.
Remove feature flag for a given Account, Course, or User. (Note that the flag must
be defined on the Account, Course, or User directly.) The object will then inherit
the feature flags from a higher account, if any exist. If this flag was 'on' or 'off',
then lower-level account flags that were masked by this one will apply again.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - feature
"""
ID
"""
path["feature"] = feature
self.logger.debug(
"DELETE /api/v1/courses/{course_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"DELETE",
"/api/v1/courses/{course_id}/features/flags/{feature}".format(**path),
data=data,
params=params,
single_item=True,
)
def remove_feature_flag_accounts(self, account_id, feature):
"""
Remove feature flag.
Remove feature flag for a given Account, Course, or User. (Note that the flag must
be defined on the Account, Course, or User directly.) The object will then inherit
the feature flags from a higher account, if any exist. If this flag was 'on' or 'off',
then lower-level account flags that were masked by this one will apply again.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# REQUIRED - PATH - feature
"""
ID
"""
path["feature"] = feature
self.logger.debug(
"DELETE /api/v1/accounts/{account_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"DELETE",
"/api/v1/accounts/{account_id}/features/flags/{feature}".format(**path),
data=data,
params=params,
single_item=True,
)
def remove_feature_flag_users(self, feature, user_id):
"""
Remove feature flag.
Remove feature flag for a given Account, Course, or User. (Note that the flag must
be defined on the Account, Course, or User directly.) The object will then inherit
the feature flags from a higher account, if any exist. If this flag was 'on' or 'off',
then lower-level account flags that were masked by this one will apply again.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""
ID
"""
path["user_id"] = user_id
# REQUIRED - PATH - feature
"""
ID
"""
path["feature"] = feature
self.logger.debug(
"DELETE /api/v1/users/{user_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"DELETE",
"/api/v1/users/{user_id}/features/flags/{feature}".format(**path),
data=data,
params=params,
single_item=True,
)
class Feature(BaseModel):
"""Feature Model."""
def __init__(
self,
feature=None,
display_name=None,
applies_to=None,
enable_at=None,
feature_flag=None,
root_opt_in=None,
beta=None,
pending_enforcement=None,
autoexpand=None,
release_notes_url=None,
):
"""Init method for Feature class."""
self._feature = feature
self._display_name = display_name
self._applies_to = applies_to
self._enable_at = enable_at
self._feature_flag = feature_flag
self._root_opt_in = root_opt_in
self._beta = beta
self._pending_enforcement = pending_enforcement
self._autoexpand = autoexpand
self._release_notes_url = release_notes_url
self.logger = logging.getLogger("py3canvas.Feature")
@property
def feature(self):
"""The symbolic name of the feature, used in FeatureFlags."""
return self._feature
@feature.setter
def feature(self, value):
"""Setter for feature property."""
self.logger.warn(
"Setting values on feature will NOT update the remote Canvas instance."
)
self._feature = value
@property
def display_name(self):
"""The user-visible name of the feature."""
return self._display_name
@display_name.setter
def display_name(self, value):
"""Setter for display_name property."""
self.logger.warn(
"Setting values on display_name will NOT update the remote Canvas instance."
)
self._display_name = value
@property
def applies_to(self):
"""The type of object the feature applies to (RootAccount, Account, Course, or User):
* RootAccount features may only be controlled by flags on root accounts.
* Account features may be controlled by flags on accounts and their parent accounts.
* Course features may be controlled by flags on courses and their parent accounts.
* User features may be controlled by flags on users and site admin only."""
return self._applies_to
@applies_to.setter
def applies_to(self, value):
"""Setter for applies_to property."""
self.logger.warn(
"Setting values on applies_to will NOT update the remote Canvas instance."
)
self._applies_to = value
@property
def enable_at(self):
"""The date this feature will be globally enabled, or null if this is not planned. (This information is subject to change.)."""
return self._enable_at
@enable_at.setter
def enable_at(self, value):
"""Setter for enable_at property."""
self.logger.warn(
"Setting values on enable_at will NOT update the remote Canvas instance."
)
self._enable_at = value
@property
def feature_flag(self):
"""The FeatureFlag that applies to the caller."""
return self._feature_flag
@feature_flag.setter
def feature_flag(self, value):
"""Setter for feature_flag property."""
self.logger.warn(
"Setting values on feature_flag will NOT update the remote Canvas instance."
)
self._feature_flag = value
@property
def root_opt_in(self):
"""If true, a feature that is 'allowed' globally will be 'off' by default in root accounts. Otherwise, root accounts inherit the global 'allowed' setting, which allows sub-accounts and courses to turn features on with no root account action."""
return self._root_opt_in
@root_opt_in.setter
def root_opt_in(self, value):
"""Setter for root_opt_in property."""
self.logger.warn(
"Setting values on root_opt_in will NOT update the remote Canvas instance."
)
self._root_opt_in = value
@property
def beta(self):
"""Whether the feature is a beta feature. If true, the feature may not be fully polished and may be subject to change in the future."""
return self._beta
@beta.setter
def beta(self, value):
"""Setter for beta property."""
self.logger.warn(
"Setting values on beta will NOT update the remote Canvas instance."
)
self._beta = value
@property
def pending_enforcement(self):
"""Whether the feature is nearing completion and will be finalized at an upcoming date."""
return self._pending_enforcement
@pending_enforcement.setter
def pending_enforcement(self, value):
"""Setter for pending_enforcement property."""
self.logger.warn(
"Setting values on pending_enforcement will NOT update the remote Canvas instance."
)
self._pending_enforcement = value
@property
def autoexpand(self):
"""Whether the details of the feature are autoexpanded on page load vs. the user clicking to expand."""
return self._autoexpand
@autoexpand.setter
def autoexpand(self, value):
"""Setter for autoexpand property."""
self.logger.warn(
"Setting values on autoexpand will NOT update the remote Canvas instance."
)
self._autoexpand = value
@property
def release_notes_url(self):
"""A URL to the release notes describing the feature."""
return self._release_notes_url
@release_notes_url.setter
def release_notes_url(self, value):
"""Setter for release_notes_url property."""
self.logger.warn(
"Setting values on release_notes_url will NOT update the remote Canvas instance."
)
self._release_notes_url = value
class Featureflag(BaseModel):
"""Featureflag Model."""
def __init__(
self, context_type=None, context_id=None, feature=None, state=None, locked=None
):
"""Init method for Featureflag class."""
self._context_type = context_type
self._context_id = context_id
self._feature = feature
self._state = state
self._locked = locked
self.logger = logging.getLogger("py3canvas.Featureflag")
@property
def context_type(self):
"""The type of object to which this flag applies (Account, Course, or User). (This field is not present if this FeatureFlag represents the global Canvas default)."""
return self._context_type
@context_type.setter
def context_type(self, value):
"""Setter for context_type property."""
self.logger.warn(
"Setting values on context_type will NOT update the remote Canvas instance."
)
self._context_type = value
@property
def context_id(self):
"""The id of the object to which this flag applies (This field is not present if this FeatureFlag represents the global Canvas default)."""
return self._context_id
@context_id.setter
def context_id(self, value):
"""Setter for context_id property."""
self.logger.warn(
"Setting values on context_id will NOT update the remote Canvas instance."
)
self._context_id = value
@property
def feature(self):
"""The feature this flag controls."""
return self._feature
@feature.setter
def feature(self, value):
"""Setter for feature property."""
self.logger.warn(
"Setting values on feature will NOT update the remote Canvas instance."
)
self._feature = value
@property
def state(self):
"""The policy for the feature at this context. can be 'off', 'allowed', 'allowed_on', or 'on'."""
return self._state
@state.setter
def state(self, value):
"""Setter for state property."""
self.logger.warn(
"Setting values on state will NOT update the remote Canvas instance."
)
self._state = value
@property
def locked(self):
"""If set, this feature flag cannot be changed in the caller's context because the flag is set 'off' or 'on' in a higher context."""
return self._locked
@locked.setter
def locked(self, value):
"""Setter for locked property."""
self.logger.warn(
"Setting values on locked will NOT update the remote Canvas instance."
)
self._locked = value
| {
"content_hash": "3f7e8ecc7a1ef8057030bfb61bfe79ac",
"timestamp": "",
"source": "github",
"line_count": 837,
"max_line_length": 252,
"avg_line_length": 32.0884109916368,
"alnum_prop": 0.5553652543003946,
"repo_name": "tylerclair/py3canvas",
"id": "a7b2c407766904a4d3f6b0ee56f143c800bab9da",
"size": "26858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py3canvas/apis/feature_flags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1988347"
}
],
"symlink_target": ""
} |
from direct.distributed.ClockDelta import *
from direct.showbase import DirectObject
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
import random
import TreasurePlannerAI
class RegenTreasurePlannerAI(TreasurePlannerAI.TreasurePlannerAI):
notify = DirectNotifyGlobal.directNotify.newCategory('RegenTreasurePlannerAI')
def __init__(self, zoneId, treasureType, taskName, spawnInterval, maxTreasures, callback = None):
TreasurePlannerAI.TreasurePlannerAI.__init__(self, zoneId, treasureType, callback)
self.taskName = '%s-%s' % (taskName, zoneId)
self.spawnInterval = spawnInterval
self.maxTreasures = maxTreasures
def start(self):
self.preSpawnTreasures()
self.startSpawning()
def stop(self):
self.stopSpawning()
def stopSpawning(self):
taskMgr.remove(self.taskName)
def startSpawning(self):
self.stopSpawning()
taskMgr.doMethodLater(self.spawnInterval, self.upkeepTreasurePopulation, self.taskName)
def upkeepTreasurePopulation(self, task):
if self.numTreasures() < self.maxTreasures:
self.placeRandomTreasure()
taskMgr.doMethodLater(self.spawnInterval, self.upkeepTreasurePopulation, self.taskName)
return Task.done
def placeRandomTreasure(self):
self.notify.debug('Placing a Treasure...')
spawnPointIndex = self.nthEmptyIndex(random.randrange(self.countEmptySpawnPoints()))
self.placeTreasure(spawnPointIndex)
def preSpawnTreasures(self):
for i in xrange(self.maxTreasures):
self.placeRandomTreasure()
| {
"content_hash": "db562689f65d15e94d5679c5985ac033",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 101,
"avg_line_length": 37.27272727272727,
"alnum_prop": 0.725,
"repo_name": "ToontownUprising/src",
"id": "a3aa4dc05f007e12586f6dcb62f8c8fb673247d7",
"size": "1640",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "toontown/safezone/RegenTreasurePlannerAI.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "36"
},
{
"name": "Python",
"bytes": "16244807"
},
{
"name": "Shell",
"bytes": "11615"
}
],
"symlink_target": ""
} |
from google.analytics import admin_v1alpha
def sample_get_custom_dimension():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetCustomDimensionRequest(
name="name_value",
)
# Make the request
response = client.get_custom_dimension(request=request)
# Handle the response
print(response)
# [END analyticsadmin_v1alpha_generated_AnalyticsAdminService_GetCustomDimension_sync]
| {
"content_hash": "88860ae623f1d2dee2b4d79ee0f6d0a7",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 86,
"avg_line_length": 26.526315789473685,
"alnum_prop": 0.7361111111111112,
"repo_name": "googleapis/python-analytics-admin",
"id": "f1b219e8d78d8fc5f78802524857279c112db1a3",
"size": "1921",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/analyticsadmin_v1alpha_generated_analytics_admin_service_get_custom_dimension_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "5576405"
},
{
"name": "Shell",
"bytes": "30687"
}
],
"symlink_target": ""
} |
import sys
from gui.application import Application
app = Application()
sys.exit(app.run())
| {
"content_hash": "911b5858dd749e45db8bd8dc31b03e0a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 39,
"avg_line_length": 15.5,
"alnum_prop": 0.7634408602150538,
"repo_name": "coolhandmook/trayjenkins",
"id": "f6a477c45efb517f41c2ffbb5c5db569d7b675b1",
"size": "112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trayjenkins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74341"
},
{
"name": "Shell",
"bytes": "192"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cdadmap', '0011_auto_20150624_1138'),
]
operations = [
migrations.AddField(
model_name='surveypanel',
name='Organization_Logo_Image',
field=models.ImageField(null=True, upload_to=b'img/%Y_%m_%d_%h_%M_%s', blank=True),
preserve_default=True,
),
]
| {
"content_hash": "2bf3b4f3669a9552d6dbc64af4ae4f17",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 95,
"avg_line_length": 25.157894736842106,
"alnum_prop": 0.5962343096234309,
"repo_name": "NiJeLorg/CDADMap",
"id": "3bb47943d637103f6fa2c9fa6b6da667cf6e817a",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cdad/cdadmap/migrations/0012_surveypanel_organization_logo_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31722"
},
{
"name": "HTML",
"bytes": "114105"
},
{
"name": "JavaScript",
"bytes": "134735"
},
{
"name": "Python",
"bytes": "165621"
},
{
"name": "Shell",
"bytes": "1691"
}
],
"symlink_target": ""
} |
"""
This module provides testing functionality of the Apache Solr Init Action.
"""
import os
import pkg_resources
from absl.testing import absltest
from absl.testing import parameterized
from integration_tests.dataproc_test_case import DataprocTestCase
class SolrTestCase(DataprocTestCase):
COMPONENT = 'solr'
INIT_ACTIONS = ['solr/solr.sh']
TEST_SCRIPT_FILE_NAME = 'verify_solr.py'
def verify_instance(self, name):
self.upload_test_file(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
self.TEST_SCRIPT_FILE_NAME), name)
self.__run_test_script(name)
self.remove_test_script(self.TEST_SCRIPT_FILE_NAME, name)
def __run_test_script(self, name):
self.assert_instance_command(
name, "python3 {}".format(self.TEST_SCRIPT_FILE_NAME))
@parameterized.parameters(
("SINGLE", ["m"]),
("STANDARD", ["m"]),
("HA", ["m-0"]),
)
def test_solr(self, configuration, machine_suffixes):
if self.getImageOs() == 'rocky':
self.skipTest("Not supported in Rocky Linux-based images")
# Skip on 2.0+ version of Dataproc because it's not supported
if self.getImageVersion() >= pkg_resources.parse_version("2.0"):
self.skipTest("Not supported in 2.0+ images")
self.createCluster(configuration, self.INIT_ACTIONS)
for machine_suffix in machine_suffixes:
self.verify_instance("{}-{}".format(self.getClusterName(),
machine_suffix))
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "80dacdbae35fef7df355926b5e8b6808",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 74,
"avg_line_length": 32.15686274509804,
"alnum_prop": 0.6146341463414634,
"repo_name": "GoogleCloudDataproc/initialization-actions",
"id": "609801de842a85fb8ed6e39da86cdc81ef6e94e7",
"size": "1640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solr/test_solr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "616"
},
{
"name": "Python",
"bytes": "143738"
},
{
"name": "R",
"bytes": "86"
},
{
"name": "Scala",
"bytes": "2116"
},
{
"name": "Shell",
"bytes": "349642"
},
{
"name": "Starlark",
"bytes": "16664"
}
],
"symlink_target": ""
} |
"""
Django settings for polyclinic project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'srhq*(rwy3z)l+uf5ivaq_)nrq*^#wl$-#fu9j=+zs6eivm5fz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'appointments',
'bootstrap3',
'datetimewidget',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'polyclinic.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'polyclinic.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
BOOTSTRAP3 = {
'jquery_url': '//code.jquery.com/jquery.min.js',
}
STATIC_URL = '/static/'
| {
"content_hash": "3c64f19492aa009cb1185cd64bd247a0",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 71,
"avg_line_length": 25.87037037037037,
"alnum_prop": 0.6882605583392984,
"repo_name": "thefivekey/django-doctor-appointment",
"id": "deb3e87e7428dc533f1490103e281457f06c458b",
"size": "2794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polyclinic/polyclinic/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "686"
},
{
"name": "Python",
"bytes": "11149"
},
{
"name": "Shell",
"bytes": "568"
}
],
"symlink_target": ""
} |
from capabilities.capability import capability
import subprocess # used for bash commands (when required), unix-only
from pipes import quote # used to sanitize bash input when complex commands are required, unix-only
class tasks(capability):
# the standard command for getting folder size is filtered down to the mere number
def analyze(projectPath):
# total size
total1 = subprocess.check_output( 'du -mcs %s | sed "2q;d"' % \
quote(projectPath), shell=True ).decode('utf-8').strip('\ttotal\n')
total = int(round(float(total1)))
# project size
project1 = subprocess.check_output( 'du --exclude=.hg -mcs %s | sed "2q;d"' % \
quote(projectPath), shell=True ).decode('utf-8').strip('\ttotal\n')
project = int(round(float(project1)))
# mercurial repo size
hgsize1 = subprocess.check_output( 'du -mcs %s/.hg | sed "2q;d"' % \
quote(projectPath), shell=True ).decode('utf-8').strip('\ttotal\n')
hgsize = int(round(float(hgsize1)))
return [total, project, hgsize]
def updateDb(dbConn, py_name, value):
# print(py_name +" "+ value)
cur = dbConn.cursor() # cursor to make changes
cur.execute( """UPDATE project.metadata SET
totalSizeInMB = %s,
projectSizeInMB = %s,
repoSizeInMB = %s
WHERE name = %s;""", (value[0], value[1], value[2], py_name) )
dbConn.commit() # save changes to db
def getColumns():
return [
['totalSizeInMB','int'],
['projectSizeInMB', 'int'],
['repoSizeInMB', 'int']
]
# end of tasks
| {
"content_hash": "796c71d9de65ea45ef5c3f88b6952fd9",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 99,
"avg_line_length": 37.97674418604651,
"alnum_prop": 0.6068585425597061,
"repo_name": "Nateowami/flex-languagedepot-metadata",
"id": "4f2aab2f9228ac8874f4564fc53a2dc5b878ebd4",
"size": "1633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/capabilities/sizeInMegabytes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lasso",
"bytes": "156547"
},
{
"name": "Python",
"bytes": "36326"
},
{
"name": "Shell",
"bytes": "3003"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .core import UnitedStates
class Minnesota(UnitedStates):
"""Minnesota"""
include_thanksgiving_friday = True
include_columbus_day = False
| {
"content_hash": "4a9e78c1b27aeef9db297e6bc95ceafd",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 66,
"avg_line_length": 26.6,
"alnum_prop": 0.6729323308270677,
"repo_name": "sayoun/workalendar",
"id": "003a97a1f67231845a42dcd25a50389600fd709a",
"size": "290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workalendar/usa/minnesota.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "383844"
}
],
"symlink_target": ""
} |
"""Definition of Legendre polynomial class"""
from utils.poly.Jacobi import Jacobi
class Legendre(Jacobi):
"""Legendre polynomial
Its attributions are the same as the Polynomial class.
"""
def __init__(self, _n):
"""initialize the instance of Jacobi polynomial
Args:
_n: the order of the Legendre polynomial
"""
super().__init__(_n, 0, 0)
def __repr__(self):
"""__repr__"""
return "{0}({1})".format(self.__class__, self.n)
| {
"content_hash": "b35e1a8bc72921be419d133c14f75d64",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 58,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.56640625,
"repo_name": "piyueh/SEM-Toolbox",
"id": "15e8fb6b1f06c399d6e57ae1ac0c6dcc4c45e609",
"size": "682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/poly/Legendre.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1280523"
},
{
"name": "Python",
"bytes": "87772"
}
],
"symlink_target": ""
} |
import argparse, os
# Command Line Arguments
parser = argparse.ArgumentParser(description='This script converts a tab file to a json fixture for django.')
parser.add_argument('-a', '--appname', help='Name of the app that owns the model: myawesomeapp', required=True)
parser.add_argument('-m', '--modelname', help='Name of the model the fixture is for: mydatamodel', required=True)
parser.add_argument('-i', '--input', help='Path to output json file: /users/name/jsonFileOutput.json', required=True)
parser.add_argument('-o', '--output', help='Path for output json file: /users/name/jsonFileOutput.json', required=True)
args = parser.parse_args()
# Arg Vars
model = args.appname + "." + args.modelname
ifile = args.input
ofile = args.output
with open (ifile, 'r') as fin:
# store the file header info for later
header = fin.readline().strip().split("\t")
# clear out the contents of any pre-existing output file
with open (ofile, 'wb') as fout:
fout.write("[")
# append the rest of the output to the json file
with open (ofile, 'a') as fout:
for index, line in enumerate(fin):
values = line.strip().split("\t")
fout.write("\n\t{\n\t\t\"model\": \"%s\",\n\t\t\"pk\": %d,\n\t\t\"fields\": {" % (model, index+1))
for index, field in enumerate(header):
# add a comma at the end of each field line iteration
if index < len(header)-1:
try:
fout.write('\n\t\t\t\"%s\": \"%s\",' % (field, values[index]))
except IndexError:
fout.write('\n\t\t\t\"%s\": \"%s\",' % (field, ""))
except Exception as e:
print "Error on line ", index
print e
# don't add a comma to the last field line though
else:
try:
fout.write("\n\t\t\t\"%s\": \"%s\"\n\t\t}\n\t}," % (field, values[index]))
except IndexError:
fout.write("\n\t\t\t\"%s\": \"%s\"\n\t\t}\n\t}," % (field, ""))
except Exception as e:
print "Error on line ", index
print e
# clean up the trailing comma and add the closing bracket
with open (ofile, 'rb+') as filehandle:
filehandle.seek(-1, os.SEEK_END)
filehandle.truncate()
filehandle.write("\n]")
| {
"content_hash": "8cf4c13298770a4c0b8536b21eb7df18",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 119,
"avg_line_length": 50.44,
"alnum_prop": 0.5344964314036479,
"repo_name": "HalescodeLLC/django-munsell",
"id": "46124ccc452afe4820b7fb9566352bb5cf042362",
"size": "2522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/tab_to_djFixture.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53466"
},
{
"name": "JavaScript",
"bytes": "143778"
},
{
"name": "Python",
"bytes": "40532"
},
{
"name": "Shell",
"bytes": "5214"
}
],
"symlink_target": ""
} |
import SWIGParser as parser
def dir_contents():
print 'parser contents:', dir(parser)
print
print 'parser.ExtPos contents:', dir(parser.ExtPos)
def display_parses(parses):
for i, (score, tree) in enumerate(parses):
print i, score
print tree
print tree.toStringPrettyPrint()
print
def initialize(n=10):
# this assumes we're in PARSE/
parser.loadModel("../DATA/EN")
parser.setOptions('En', False, n, True, 21, 0, 0)
def test_tokenizer():
sr = parser.tokenize("junk <s> It's some text to tokenize, if you feel like it -- or not. </s>", 399)
print 'sr %r' % str(sr)
print 'sr length', len(sr)
for i in range(len(sr)):
print 'sr word', i, sr.getWord(i).lexeme()
return sr
def test_parse():
sr1 = parser.SentRep(['These', 'are', 'tokens', '.'])
sr2 = test_tokenizer()
for sr in (sr1, sr2):
parses = parser.parse(sr)
display_parses(parses)
print '---'
def test_as_nbest_list():
sr1 = parser.SentRep(['These', 'are', 'tokens', '.'])
parses = parser.parse(sr1)
print parser.asNBestList(parses, 'test_as_nbest_list_sentence')
def test_extpos():
sr1 = parser.SentRep(['record'])
print 'Unconstrained'
display_parses(parser.parse(sr1))
print 'NN'
ext_pos1 = parser.ExtPos()
ext_pos1.addTagConstraints(parser.StringVector(['NN']))
display_parses(parser.parse(sr1, ext_pos1, None))
print 'VB'
ext_pos2 = parser.ExtPos()
ext_pos2.addTagConstraints(parser.StringVector(['VB']))
display_parses(parser.parse(sr1, ext_pos2, None))
def test_multiword_extpos():
sr1 = parser.SentRep('British left waffles on Falklands .'.split())
print 'waffles = [anything]:'
display_parses(parser.parse(sr1))
if 1:
print 'waffles = VBZ/VBD/VB:'
ext_pos = parser.ExtPos()
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector(['VBZ', 'VBD', 'VB']))
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector([]))
display_parses(parser.parse(sr1, ext_pos, None))
print 'waffles = NNS:'
ext_pos = parser.ExtPos()
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector(['NNS']))
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector([]))
display_parses(parser.parse(sr1, ext_pos, None))
print 'waffles = NN/NNS:'
ext_pos = parser.ExtPos()
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector(['NN', 'NNS']))
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector([]))
ext_pos.addTagConstraints(parser.StringVector([]))
display_parses(parser.parse(sr1, ext_pos, None))
if __name__ == "__main__":
dir_contents()
if 1:
initialize(n=5)
test_as_nbest_list()
for x in range(1000): # memory leak detection
print 'iteration', x
test_tokenizer()
test_parse()
test_multiword_extpos()
test_extpos()
| {
"content_hash": "c346d2f571763bd4cdacea6df7919650",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 105,
"avg_line_length": 33.74528301886792,
"alnum_prop": 0.6276209113782499,
"repo_name": "dmcc/bllip-parser",
"id": "41d61c1841379ccbbf68a3c85829008ac77f8a2d",
"size": "4124",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "first-stage/PARSE/swig/python/test/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "406"
},
{
"name": "C",
"bytes": "682774"
},
{
"name": "C++",
"bytes": "2188548"
},
{
"name": "Common Lisp",
"bytes": "1233"
},
{
"name": "GAP",
"bytes": "35887650"
},
{
"name": "Java",
"bytes": "9098"
},
{
"name": "Lex",
"bytes": "16820"
},
{
"name": "M",
"bytes": "876"
},
{
"name": "Makefile",
"bytes": "51245"
},
{
"name": "Objective-C",
"bytes": "2633"
},
{
"name": "Perl6",
"bytes": "484"
},
{
"name": "Python",
"bytes": "254799"
},
{
"name": "Rebol",
"bytes": "1217"
},
{
"name": "Ruby",
"bytes": "195"
},
{
"name": "Shell",
"bytes": "20258"
}
],
"symlink_target": ""
} |
"""resource property
Revision ID: 02e2f2186d98
Revises: f4084140f608
Create Date: 2020-04-17 15:51:40.542459
"""
# revision identifiers, used by Alembic.
revision = '02e2f2186d98'
down_revision = 'f4084140f608'
import uuid
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
op.create_table('resource_properties',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('resource_type', sa.String(255), nullable=False),
sa.Column('property_name', sa.String(255),
nullable=False),
sa.Column('private', sa.Boolean, nullable=False,
server_default=sa.false()),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('resource_type', 'property_name'))
if op.get_bind().engine.name != 'sqlite':
connection = op.get_bind()
host_query = connection.execute("""
SELECT DISTINCT "physical:host", capability_name
FROM computehost_extra_capabilities;""")
capability_values = [
(str(uuid.uuid4()), resource_type, capability_name)
for resource_type, capability_name
in host_query.fetchall()]
if capability_values:
insert = """
INSERT INTO resource_properties
(id, resource_type, property_name)
VALUES {};"""
connection.execute(
insert.format(', '.join(map(str, capability_values))))
op.add_column('computehost_extra_capabilities',
sa.Column('property_id', sa.String(length=255),
nullable=False))
connection.execute("""
UPDATE computehost_extra_capabilities c
LEFT JOIN resource_properties e
ON e.property_name = c.capability_name
SET c.property_id = e.id;""")
op.create_foreign_key('computehost_resource_property_id_fk',
'computehost_extra_capabilities',
'resource_properties', ['property_id'], ['id'])
op.drop_column('computehost_extra_capabilities', 'capability_name')
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('computehost_extra_capabilities',
sa.Column('capability_name', mysql.VARCHAR(length=64),
nullable=False))
if op.get_bind().engine.name != 'sqlite':
connection = op.get_bind()
connection.execute("""
UPDATE computehost_extra_capabilities c
LEFT JOIN resource_properties e
ON e.id=c.property_id
SET c.capability_name = e.property_name;""")
op.drop_constraint('computehost_resource_property_id_fk',
'computehost_extra_capabilities',
type_='foreignkey')
op.drop_column('computehost_extra_capabilities', 'property_id')
op.drop_table('resource_properties')
| {
"content_hash": "401243ea91f305f527c15fd4a1f3f180",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 38.151162790697676,
"alnum_prop": 0.5672051203901249,
"repo_name": "openstack/blazar",
"id": "89d30170f48849c3927c648724978d60d68d6922",
"size": "3868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blazar/db/migration/alembic_migrations/versions/02e2f2186d98_resource_property.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1014"
},
{
"name": "Python",
"bytes": "1165064"
},
{
"name": "Shell",
"bytes": "10357"
}
],
"symlink_target": ""
} |
from flask import Blueprint, Response, jsonify
from application.helpers import crossdomain
from modules.job_table.controllers import get_job_table_filter, get_job_table_query
job_table_api_pages = Blueprint('job_table_api', __name__
, template_folder='templates')
@job_table_api_pages.route("/common")
@crossdomain(origin='*')
def json_jobs_by_account() -> Response:
filter = get_job_table_filter()
query = get_job_table_query(filter)
return jsonify(list(map(lambda x: x[0].to_dict(), query.all())))
| {
"content_hash": "5f1e2cd8bc6a58b8233ea392029d72c1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 83,
"avg_line_length": 30,
"alnum_prop": 0.7352941176470589,
"repo_name": "srcc-msu/job_statistics",
"id": "4be6b8d879cef48b16e805a03d9f3aded757721e",
"size": "510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/job_table/api_controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19266"
},
{
"name": "HTML",
"bytes": "62218"
},
{
"name": "JavaScript",
"bytes": "97906"
},
{
"name": "Makefile",
"bytes": "342"
},
{
"name": "Python",
"bytes": "102112"
}
],
"symlink_target": ""
} |
"""JOSE interfaces."""
import abc
import collections
import json
import six
from acme.jose import errors
from acme.jose import util
# pylint: disable=no-self-argument,no-method-argument,no-init,inherit-non-class
# pylint: disable=too-few-public-methods
@six.add_metaclass(abc.ABCMeta)
class JSONDeSerializable(object):
# pylint: disable=too-few-public-methods
"""Interface for (de)serializable JSON objects.
Please recall, that standard Python library implements
:class:`json.JSONEncoder` and :class:`json.JSONDecoder` that perform
translations based on respective :ref:`conversion tables
<conversion-table>` that look pretty much like the one below (for
complete tables see relevant Python documentation):
.. _conversion-table:
====== ======
JSON Python
====== ======
object dict
... ...
====== ======
While the above **conversion table** is about translation of JSON
documents to/from the basic Python types only,
:class:`JSONDeSerializable` introduces the following two concepts:
serialization
Turning an arbitrary Python object into Python object that can
be encoded into a JSON document. **Full serialization** produces
a Python object composed of only basic types as required by the
:ref:`conversion table <conversion-table>`. **Partial
serialization** (accomplished by :meth:`to_partial_json`)
produces a Python object that might also be built from other
:class:`JSONDeSerializable` objects.
deserialization
Turning a decoded Python object (necessarily one of the basic
types as required by the :ref:`conversion table
<conversion-table>`) into an arbitrary Python object.
Serialization produces **serialized object** ("partially serialized
object" or "fully serialized object" for partial and full
serialization respectively) and deserialization produces
**deserialized object**, both usually denoted in the source code as
``jobj``.
Wording in the official Python documentation might be confusing
after reading the above, but in the light of those definitions, one
can view :meth:`json.JSONDecoder.decode` as decoder and
deserializer of basic types, :meth:`json.JSONEncoder.default` as
serializer of basic types, :meth:`json.JSONEncoder.encode` as
serializer and encoder of basic types.
One could extend :mod:`json` to support arbitrary object
(de)serialization either by:
- overriding :meth:`json.JSONDecoder.decode` and
:meth:`json.JSONEncoder.default` in subclasses
- or passing ``object_hook`` argument (or ``object_hook_pairs``)
to :func:`json.load`/:func:`json.loads` or ``default`` argument
for :func:`json.dump`/:func:`json.dumps`.
Interestingly, ``default`` is required to perform only partial
serialization, as :func:`json.dumps` applies ``default``
recursively. This is the idea behind making :meth:`to_partial_json`
produce only partial serialization, while providing custom
:meth:`json_dumps` that dumps with ``default`` set to
:meth:`json_dump_default`.
To make further documentation a bit more concrete, please, consider
the following imaginatory implementation example::
class Foo(JSONDeSerializable):
def to_partial_json(self):
return 'foo'
@classmethod
def from_json(cls, jobj):
return Foo()
class Bar(JSONDeSerializable):
def to_partial_json(self):
return [Foo(), Foo()]
@classmethod
def from_json(cls, jobj):
return Bar()
"""
@abc.abstractmethod
def to_partial_json(self): # pragma: no cover
"""Partially serialize.
Following the example, **partial serialization** means the following::
assert isinstance(Bar().to_partial_json()[0], Foo)
assert isinstance(Bar().to_partial_json()[1], Foo)
# in particular...
assert Bar().to_partial_json() != ['foo', 'foo']
:raises acme.jose.errors.SerializationError:
in case of any serialization error.
:returns: Partially serializable object.
"""
raise NotImplementedError()
def to_json(self):
"""Fully serialize.
Again, following the example from before, **full serialization**
means the following::
assert Bar().to_json() == ['foo', 'foo']
:raises acme.jose.errors.SerializationError:
in case of any serialization error.
:returns: Fully serialized object.
"""
def _serialize(obj):
if isinstance(obj, JSONDeSerializable):
return _serialize(obj.to_partial_json())
if isinstance(obj, six.string_types): # strings are Sequence
return obj
elif isinstance(obj, list):
return [_serialize(subobj) for subobj in obj]
elif isinstance(obj, collections.Sequence):
# default to tuple, otherwise Mapping could get
# unhashable list
return tuple(_serialize(subobj) for subobj in obj)
elif isinstance(obj, collections.Mapping):
return dict((_serialize(key), _serialize(value))
for key, value in six.iteritems(obj))
else:
return obj
return _serialize(self)
@util.abstractclassmethod
def from_json(cls, jobj): # pylint: disable=unused-argument
"""Deserialize a decoded JSON document.
:param jobj: Python object, composed of only other basic data
types, as decoded from JSON document. Not necessarily
:class:`dict` (as decoded from "JSON object" document).
:raises acme.jose.errors.DeserializationError:
if decoding was unsuccessful, e.g. in case of unparseable
X509 certificate, or wrong padding in JOSE base64 encoded
string, etc.
"""
# TypeError: Can't instantiate abstract class <cls> with
# abstract methods from_json, to_partial_json
return cls() # pylint: disable=abstract-class-instantiated
@classmethod
def json_loads(cls, json_string):
"""Deserialize from JSON document string."""
try:
loads = json.loads(json_string)
except ValueError as error:
raise errors.DeserializationError(error)
return cls.from_json(loads)
def json_dumps(self, **kwargs):
"""Dump to JSON string using proper serializer.
:returns: JSON document string.
:rtype: str
"""
return json.dumps(self, default=self.json_dump_default, **kwargs)
def json_dumps_pretty(self):
"""Dump the object to pretty JSON document string.
:rtype: str
"""
return self.json_dumps(sort_keys=True, indent=4)
@classmethod
def json_dump_default(cls, python_object):
"""Serialize Python object.
This function is meant to be passed as ``default`` to
:func:`json.dump` or :func:`json.dumps`. They call
``default(python_object)`` only for non-basic Python types, so
this function necessarily raises :class:`TypeError` if
``python_object`` is not an instance of
:class:`IJSONSerializable`.
Please read the class docstring for more information.
"""
if isinstance(python_object, JSONDeSerializable):
return python_object.to_partial_json()
else: # this branch is necessary, cannot just "return"
raise TypeError(repr(python_object) + ' is not JSON serializable')
| {
"content_hash": "cfb85ec1a136b835c6fe9e213a93b176",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 79,
"avg_line_length": 35.86574074074074,
"alnum_prop": 0.6415386601265006,
"repo_name": "ghyde/letsencrypt",
"id": "f85777a30ce8b1c3714a92da428ee1deb6ac73f4",
"size": "7747",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "acme/acme/jose/interfaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14147"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1147194"
},
{
"name": "Shell",
"bytes": "20050"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("XGBRegressor" , "california_housing" , "oracle")
| {
"content_hash": "42c479052caa4f5011f91fb4e594dc24",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 68,
"avg_line_length": 34.5,
"alnum_prop": 0.7753623188405797,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "8306e86c1248b7de39bb409b8cd406c9c0bd5d20",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regression/california_housing/ws_california_housing_XGBRegressor_oracle_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class Organization(models.Model):
"""Model for organisations"""
name = models.CharField(max_length=255, unique=True)
description = models.TextField(max_length=500, blank=True)
website = models.URLField(max_length=500, blank=True)
codops = models.CharField(max_length=255, unique=True, null=True)
logo = models.ImageField(upload_to='uploads/orgalogos', blank=True)
def __str__(self):
return """[{codops}] {name}""".format(codops=self.codops, name=self.name)
class LppUser(AbstractUser):
organization = models.ForeignKey('Organization', null=True, blank=True, on_delete=models.CASCADE)
def __str__(self):
return self.username
| {
"content_hash": "ba43febbd9bb155c248867d1b951d24f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 101,
"avg_line_length": 35.90909090909091,
"alnum_prop": 0.7113924050632912,
"repo_name": "ebu/radiodns-plugit",
"id": "4bfe468d814ae0bbca7accd67046cd094e7c24c6",
"size": "790",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "LightweightPlugitProxy/lpp_core/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11815"
},
{
"name": "Python",
"bytes": "190516"
}
],
"symlink_target": ""
} |
"""Test MockupDB."""
import contextlib
import datetime
import os
import ssl
import sys
import tempfile
import unittest
from struct import Struct
if sys.version_info[0] < 3:
from io import BytesIO as StringIO
else:
from io import StringIO
try:
from queue import Queue
except ImportError:
from Queue import Queue
from bson import (Binary, BSON, Code, DBRef, Decimal128, MaxKey, MinKey,
ObjectId, Regex, SON, Timestamp)
from bson.codec_options import CodecOptions
from pymongo import MongoClient, message, WriteConcern
from mockupdb import (go, going, Command, CommandBase, Matcher, MockupDB,
Request, OpInsert, OP_MSG_FLAGS, OpMsg, OpQuery,
QUERY_FLAGS)
@contextlib.contextmanager
def capture_stderr():
sio = StringIO()
stderr, sys.stderr = sys.stderr, sio
try:
yield sio
finally:
sys.stderr = stderr
sio.seek(0)
class TestGoing(unittest.TestCase):
def test_nested_errors(self):
def thrower():
raise AssertionError("thrown")
with capture_stderr() as stderr:
with self.assertRaises(ZeroDivisionError):
with going(thrower) as future:
1 / 0
self.assertIn('error in going(', stderr.getvalue())
self.assertIn('AssertionError: thrown', stderr.getvalue())
# Future keeps raising.
self.assertRaises(AssertionError, future)
self.assertRaises(AssertionError, future)
class TestRequest(unittest.TestCase):
def _pack_request(self, ns, slave_ok):
flags = 4 if slave_ok else 0
request_id, msg_bytes, max_doc_size = message.query(
flags, ns, 0, 0, {}, None, CodecOptions())
# Skip 16-byte standard header.
return msg_bytes[16:], request_id
def test_flags(self):
request = Request()
self.assertIsNone(request.flags)
self.assertFalse(request.slave_ok)
msg_bytes, request_id = self._pack_request('db.collection', False)
request = OpQuery.unpack(msg_bytes, None, None, request_id)
self.assertIsInstance(request, OpQuery)
self.assertNotIsInstance(request, Command)
self.assertEqual(0, request.flags)
self.assertFalse(request.slave_ok)
self.assertFalse(request.slave_okay) # Synonymous.
msg_bytes, request_id = self._pack_request('db.$cmd', False)
request = OpQuery.unpack(msg_bytes, None, None, request_id)
self.assertIsInstance(request, Command)
self.assertEqual(0, request.flags)
msg_bytes, request_id = self._pack_request('db.collection', True)
request = OpQuery.unpack(msg_bytes, None, None, request_id)
self.assertEqual(4, request.flags)
self.assertTrue(request.slave_ok)
msg_bytes, request_id = self._pack_request('db.$cmd', True)
request = OpQuery.unpack(msg_bytes, None, None, request_id)
self.assertEqual(4, request.flags)
def test_fields(self):
self.assertIsNone(OpQuery({}).fields)
self.assertEqual({'_id': False, 'a': 1},
OpQuery({}, fields={'_id': False, 'a': 1}).fields)
def test_repr(self):
self.assertEqual('Request()', repr(Request()))
self.assertEqual('Request({})', repr(Request({})))
self.assertEqual('Request({})', repr(Request([{}])))
self.assertEqual('Request(flags=4)', repr(Request(flags=4)))
self.assertEqual('OpQuery({})', repr(OpQuery()))
self.assertEqual('OpQuery({})', repr(OpQuery({})))
self.assertEqual('OpQuery({})', repr(OpQuery([{}])))
self.assertEqual('OpQuery({}, flags=SlaveOkay)',
repr(OpQuery(flags=4)))
self.assertEqual('OpQuery({}, flags=SlaveOkay)',
repr(OpQuery({}, flags=4)))
self.assertEqual('OpQuery({}, flags=TailableCursor|AwaitData)',
repr(OpQuery({}, flags=34)))
self.assertEqual('Command({})', repr(Command()))
self.assertEqual('Command({"foo": 1})', repr(Command('foo')))
son = SON([('b', 1), ('a', 1), ('c', 1)])
self.assertEqual('Command({"b": 1, "a": 1, "c": 1})',
repr(Command(son)))
self.assertEqual('Command({}, flags=SlaveOkay)',
repr(Command(flags=4)))
self.assertEqual('OpInsert({}, {})', repr(OpInsert([{}, {}])))
self.assertEqual('OpInsert({}, {})', repr(OpInsert({}, {})))
def test_assert_matches(self):
request = OpQuery({'x': 17}, flags=QUERY_FLAGS['SlaveOkay'])
request.assert_matches(request)
with self.assertRaises(AssertionError):
request.assert_matches(Command('foo'))
class TestUnacknowledgedWrites(unittest.TestCase):
def setUp(self):
self.server = MockupDB(auto_ismaster=True)
self.server.run()
self.addCleanup(self.server.stop)
self.client = MongoClient(self.server.uri)
self.collection = self.client.db.get_collection(
'collection', write_concern=WriteConcern(w=0))
def test_insert_one(self):
with going(self.collection.insert_one, {'_id': 1}):
# The moreToCome flag = 2.
self.server.receives(
OpMsg('insert', 'collection', writeConcern={'w': 0}, flags=2))
def test_insert_many(self):
collection = self.collection.with_options(
write_concern=WriteConcern(0))
docs = [{'_id': 1}, {'_id': 2}]
with going(collection.insert_many, docs, ordered=False):
self.server.receives(OpMsg(SON([
('insert', 'collection'),
('ordered', False),
('writeConcern', {'w': 0})]), flags=2))
def test_replace_one(self):
with going(self.collection.replace_one, {}, {}):
self.server.receives(OpMsg(SON([
('update', 'collection'),
('writeConcern', {'w': 0})
]), flags=2))
def test_update_many(self):
with going(self.collection.update_many, {}, {'$unset': 'a'}):
self.server.receives(OpMsg(SON([
('update', 'collection'),
('ordered', True),
('writeConcern', {'w': 0})
]), flags=2))
def test_delete_one(self):
with going(self.collection.delete_one, {}):
self.server.receives(OpMsg(SON([
('delete', 'collection'),
('writeConcern', {'w': 0})
]), flags=2))
def test_delete_many(self):
with going(self.collection.delete_many, {}):
self.server.receives(OpMsg(SON([
('delete', 'collection'),
('writeConcern', {'w': 0})]), flags=2))
class TestMatcher(unittest.TestCase):
def test_command_name_case_insensitive(self):
self.assertTrue(
Matcher(Command('ismaster')).matches(Command('IsMaster')))
def test_command_first_arg(self):
self.assertFalse(
Matcher(Command(ismaster=1)).matches(Command(ismaster=2)))
def test_command_fields(self):
self.assertTrue(
Matcher(Command('a', b=1)).matches(Command('a', b=1)))
self.assertFalse(
Matcher(Command('a', b=1)).matches(Command('a', b=2)))
def test_bson_classes(self):
_id = '5a918f9fa08bff9c7688d3e1'
for a, b in [
(Binary(b'foo'), Binary(b'foo')),
(Code('foo'), Code('foo')),
(Code('foo', {'x': 1}), Code('foo', {'x': 1})),
(DBRef('coll', 1), DBRef('coll', 1)),
(DBRef('coll', 1, 'db'), DBRef('coll', 1, 'db')),
(Decimal128('1'), Decimal128('1')),
(MaxKey(), MaxKey()),
(MinKey(), MinKey()),
(ObjectId(_id), ObjectId(_id)),
(Regex('foo', 'i'), Regex('foo', 'i')),
(Timestamp(1, 2), Timestamp(1, 2)),
]:
# Basic case.
self.assertTrue(
Matcher(Command(y=b)).matches(Command(y=b)),
"MockupDB %r doesn't equal itself" % (b,))
# First Command argument is special, try comparing the second also.
self.assertTrue(
Matcher(Command('x', y=b)).matches(Command('x', y=b)),
"MockupDB %r doesn't equal itself" % (b,))
# In practice, users pass PyMongo classes in message specs.
self.assertTrue(
Matcher(Command(y=b)).matches(Command(y=a)),
"PyMongo %r != MockupDB %r" % (a, b))
self.assertTrue(
Matcher(Command('x', y=b)).matches(Command('x', y=a)),
"PyMongo %r != MockupDB %r" % (a, b))
def test_datetime(self):
server = MockupDB(auto_ismaster=True)
server.run()
client = MongoClient(server.uri)
# Python datetimes have microsecond precision, BSON only millisecond.
# Ensure this datetime matches itself despite the truncation.
dt = datetime.datetime(2018, 12, 1, 6, 6, 6, 12345)
doc = SON([('_id', 1), ('dt', dt)])
with going(client.db.collection.insert_one, doc):
server.receives(
OpMsg('insert', 'collection', documents=[doc])).ok()
class TestAutoresponds(unittest.TestCase):
def test_auto_dequeue(self):
server = MockupDB(auto_ismaster=True)
server.run()
client = MongoClient(server.uri)
future = go(client.admin.command, 'ping')
server.autoresponds('ping') # Should dequeue the request.
future()
def test_autoresponds_case_insensitive(self):
server = MockupDB(auto_ismaster=True)
# Little M. Note this is only case-insensitive because it's a Command.
server.autoresponds(CommandBase('fooBar'), foo='bar')
server.run()
response = MongoClient(server.uri).admin.command('Foobar')
self.assertEqual('bar', response['foo'])
class TestSSL(unittest.TestCase):
def test_ssl_uri(self):
server = MockupDB(ssl=True)
server.run()
self.addCleanup(server.stop)
self.assertEqual(
'mongodb://localhost:%d/?ssl=true' % server.port,
server.uri)
def test_ssl_basic(self):
server = MockupDB(ssl=True, auto_ismaster=True)
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri, ssl_cert_reqs=ssl.CERT_NONE)
client.db.command('ismaster')
class TestMockupDB(unittest.TestCase):
def test_iteration(self):
server = MockupDB(auto_ismaster={'maxWireVersion': 3})
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
def send_three_docs():
for i in range(3):
client.test.test.insert({'_id': i})
with going(send_three_docs):
j = 0
# The "for request in server" statement is the point of this test.
for request in server:
self.assertTrue(request.matches({'insert': 'test',
'documents': [{'_id': j}]}))
request.ok()
j += 1
if j == 3:
break
def test_default_wire_version(self):
server = MockupDB(auto_ismaster=True)
server.run()
self.addCleanup(server.stop)
ismaster = MongoClient(server.uri).admin.command('isMaster')
self.assertEqual(ismaster['minWireVersion'], 0)
self.assertEqual(ismaster['maxWireVersion'], 6)
def test_wire_version(self):
server = MockupDB(auto_ismaster=True,
min_wire_version=1,
max_wire_version=42)
server.run()
self.addCleanup(server.stop)
ismaster = MongoClient(server.uri).admin.command('isMaster')
self.assertEqual(ismaster['minWireVersion'], 1)
self.assertEqual(ismaster['maxWireVersion'], 42)
@unittest.skipIf(sys.platform == 'win32', 'Windows')
def test_unix_domain_socket(self):
tmp = tempfile.NamedTemporaryFile(delete=False, suffix='.sock')
tmp.close()
server = MockupDB(auto_ismaster={'maxWireVersion': 3},
uds_path=tmp.name)
server.run()
self.assertTrue(server.uri.endswith('.sock'),
'Expected URI "%s" to end with ".sock"' % (server.uri,))
self.assertEqual(server.host, tmp.name)
self.assertEqual(server.port, 0)
self.assertEqual(server.address, (tmp.name, 0))
self.assertEqual(server.address_string, tmp.name)
client = MongoClient(server.uri)
with going(client.test.command, {'foo': 1}) as future:
server.receives().ok()
response = future()
self.assertEqual(1, response['ok'])
server.stop()
self.assertFalse(os.path.exists(tmp.name))
class TestResponse(unittest.TestCase):
def test_ok(self):
server = MockupDB(auto_ismaster={'maxWireVersion': 3})
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
with going(client.test.command, {'foo': 1}) as future:
server.receives().ok(3)
response = future()
self.assertEqual(3, response['ok'])
class TestOpMsg(unittest.TestCase):
def setUp(self):
self.server = MockupDB(auto_ismaster={'maxWireVersion': 6})
self.server.run()
self.addCleanup(self.server.stop)
self.client = MongoClient(self.server.uri)
def test_flags(self):
doc = SON([('foo', 1), ('$db', 'mydb')])
obj = BSON.encode(doc)
for flag_name, flag_bit in OP_MSG_FLAGS.items():
# MockupDB strips 16-byte header then calls unpack on body.
message_body = b''.join([
Struct('<I').pack(flag_bit), # flagBits
Struct('<b').pack(0), # section kind
obj,
])
if flag_name == 'checksumPresent':
message_body += Struct('<I').pack(1234)
op_msg = OpMsg.unpack(msg=message_body,
client=None,
server=None,
request_id=0)
self.assertEqual(op_msg.flags, flag_bit)
self.assertEqual(op_msg.doc, doc)
self.assertEqual(op_msg.namespace, 'mydb')
if flag_name == 'checksumPresent':
self.assertEqual(op_msg.checksum, 1234)
else:
self.assertEqual(op_msg.checksum, None)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "fcad60068bbbe0ec5168f558b4007d61",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 80,
"avg_line_length": 36.03186274509804,
"alnum_prop": 0.5654717366165567,
"repo_name": "ajdavis/mongo-mockup-db",
"id": "e9d04e8d1f3cbfc9178fc21a0cd77c32c8c1980e",
"size": "14748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_mockupdb.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "84534"
}
],
"symlink_target": ""
} |
"""oilboy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from apps import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^price.json$', views.price, name='price'),
]
| {
"content_hash": "c2a0af3dce58e5cd8cd5a63b912408c4",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 36.75,
"alnum_prop": 0.6893424036281179,
"repo_name": "oilboy/oilboy",
"id": "e9bd264095750dc30ff44a52e5a1907b6897bcf0",
"size": "882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1360"
},
{
"name": "HTML",
"bytes": "899"
},
{
"name": "JavaScript",
"bytes": "2796"
},
{
"name": "Python",
"bytes": "9860"
}
],
"symlink_target": ""
} |
"""
Module import for `hsdev` backend.
"""
| {
"content_hash": "b2f0a39f9bc9013537119e9543458c8e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 34,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.627906976744186,
"repo_name": "bscottm/SublimeHaskell",
"id": "96fbcaa4f836d0e89f539320b1495b39cbc67588",
"size": "43",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hsdev/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Haskell",
"bytes": "3829"
},
{
"name": "Python",
"bytes": "427950"
},
{
"name": "Shell",
"bytes": "710"
}
],
"symlink_target": ""
} |
import DexReader
reader = DexReader.DexReader("/dev/ttyp3")
data = reader.read()
print "===DEX DATA:"
print data
print "=== end of data" | {
"content_hash": "d150b3003db240bbd71e54ac4cf99ff5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 17.25,
"alnum_prop": 0.6956521739130435,
"repo_name": "mdisibio/dex-reader",
"id": "d67d741912da10390053e228fd5caab9d3b56462",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14993"
}
],
"symlink_target": ""
} |
import unittest
import os
import shutil
import dirtools
import incremental_backups_tools as ibt
class TestIncrementalBackupstools(unittest.TestCase):
def setUp(self):
""" Initialize directory for testing diff and patch. """
base_path = '/tmp/test_incremental_backups_tools'
self.base_path = base_path
os.mkdir(base_path)
with open(os.path.join(base_path, 'file1'), 'w') as f:
f.write('contents1')
with open(os.path.join(base_path, 'file2'), 'w') as f:
f.write('contents2')
with open(os.path.join(base_path, 'file3.py'), 'w') as f:
f.write('print "ok"')
f = open(os.path.join(base_path, 'file3.pyc'), 'w')
f.close()
with open(os.path.join(base_path, '.exclude'), 'w') as f:
f.write('excluded_dir/\n*.pyc')
os.mkdir(os.path.join(base_path, 'excluded_dir'))
with open(os.path.join(base_path, 'excluded_dir/excluded_file'), 'w') as f:
f.write('excluded')
os.mkdir(os.path.join(base_path, 'dir1'))
os.mkdir(os.path.join(base_path, 'dir1/subdir1'))
with open(os.path.join(base_path, 'dir1/subdir1/file_subdir1'), 'w') as f:
f.write('inside subir1')
f = open(os.path.join(base_path, 'dir1/subdir1/.project'), 'w')
f.close()
os.mkdir(os.path.join(base_path, 'dir2'))
with open(os.path.join(base_path, 'dir2/file_dir2'), 'w') as f:
f.write('inside dir2')
with open(os.path.join(base_path, 'dir1/subdir1/file_subdir1'), 'w') as f:
f.write('inside subdir1')
shutil.copytree(base_path, base_path + '2')
shutil.copytree(base_path, base_path + '3')
shutil.copytree(base_path, base_path + '4')
# We modify test_dirtools for the first time
with open(os.path.join(base_path + '2', 'file4'), 'w') as f:
f.write('contents4')
os.remove(os.path.join(base_path + '2', 'file2'))
os.mkdir(os.path.join(base_path + '2', 'dir3'))
with open(os.path.join(base_path + '2', 'dir3/file3'), 'w') as f:
f.write('contents3')
with open(os.path.join(base_path + '2', 'file1'), 'w') as f:
f.write('new things')
shutil.rmtree(os.path.join(base_path + '2', 'dir1/subdir1'))
self.dir = dirtools.Dir('/tmp/test_incremental_backups_tools')
self.dir2 = dirtools.Dir('/tmp/test_incremental_backups_tools2')
self.dir3 = dirtools.Dir('/tmp/test_incremental_backups_tools3')
self.dir4 = dirtools.Dir('/tmp/test_incremental_backups_tools4')
def tearDown(self):
shutil.rmtree('/tmp/test_incremental_backups_tools')
shutil.rmtree('/tmp/test_incremental_backups_tools2')
shutil.rmtree('/tmp/test_incremental_backups_tools3')
shutil.rmtree('/tmp/test_incremental_backups_tools4')
def testFullBackup(self):
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f8d92a008fb1edc7d1d9efb2b1837628",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 83,
"avg_line_length": 41.859154929577464,
"alnum_prop": 0.5942126514131898,
"repo_name": "tsileo/incremental-backups-tools",
"id": "4c0f9cead6795678e440c34c7540c3536e4fe206",
"size": "2997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_incremental_backups_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17232"
}
],
"symlink_target": ""
} |
import datetime
from django.core.cache import cache
from django.db import models
from django.db.models import Sum
import commonware.log
import mkt
import mkt.constants.comm as comm
from mkt.comm.utils import create_comm_note
from mkt.site.models import ManagerBase, ModelBase
from mkt.site.utils import cache_ns_key
from mkt.tags.models import Tag
from mkt.translations.fields import save_signal, TranslatedField
from mkt.users.models import UserProfile
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Webapp
from mkt.websites.models import Website
user_log = commonware.log.getLogger('z.users')
QUEUE_TARAKO = 'tarako'
class CannedResponse(ModelBase):
name = TranslatedField()
response = TranslatedField(short=False)
sort_group = models.CharField(max_length=255)
class Meta:
db_table = 'cannedresponses'
def __unicode__(self):
return unicode(self.name)
models.signals.pre_save.connect(save_signal, sender=CannedResponse,
dispatch_uid='cannedresponses_translations')
class EditorSubscription(ModelBase):
user = models.ForeignKey(UserProfile)
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'editor_subscriptions'
class ReviewerScore(ModelBase):
user = models.ForeignKey(UserProfile, related_name='_reviewer_scores')
addon = models.ForeignKey(Webapp, blank=True, null=True, related_name='+')
website = models.ForeignKey(Website, blank=True, null=True,
related_name='+')
score = models.SmallIntegerField()
# For automated point rewards.
note_key = models.SmallIntegerField(choices=mkt.REVIEWED_CHOICES.items(),
default=0)
# For manual point rewards with a note.
note = models.CharField(max_length=255, blank=True)
class Meta:
db_table = 'reviewer_scores'
ordering = ('-created',)
@classmethod
def get_key(cls, key=None, invalidate=False):
namespace = 'riscore'
if not key: # Assuming we're invalidating the namespace.
cache_ns_key(namespace, invalidate)
return
else:
# Using cache_ns_key so each cache val is invalidated together.
ns_key = cache_ns_key(namespace, invalidate)
return '%s:%s' % (ns_key, key)
@classmethod
def get_event(cls, addon, status, **kwargs):
"""Return the review event type constant.
This is determined by the app type and the queue the addon is
currently in (which is determined from the status).
Note: We're not using addon.status because this is called after the
status has been updated by the reviewer action.
"""
if addon.is_packaged:
if status in mkt.WEBAPPS_APPROVED_STATUSES:
if addon.app_type_id == mkt.ADDON_WEBAPP_PRIVILEGED:
return mkt.REVIEWED_WEBAPP_PRIVILEGED_UPDATE
else:
return mkt.REVIEWED_WEBAPP_UPDATE
else: # If it's not PUBLIC, assume it's a new submission.
if addon.app_type_id == mkt.ADDON_WEBAPP_PRIVILEGED:
return mkt.REVIEWED_WEBAPP_PRIVILEGED
else:
return mkt.REVIEWED_WEBAPP_PACKAGED
else: # It's a hosted app.
in_rereview = kwargs.pop('in_rereview', False)
if status in mkt.WEBAPPS_APPROVED_STATUSES and in_rereview:
return mkt.REVIEWED_WEBAPP_REREVIEW
else:
return mkt.REVIEWED_WEBAPP_HOSTED
@classmethod
def get_extra_platform_points(cls, addon, status):
"""Gives extra points to reviews of apps that are compatible with
multiple platforms, to reflect the extra effort involved. Only new
submissions get extra points (for now).
"""
if status in mkt.WEBAPPS_APPROVED_STATUSES:
return 0
event = mkt.REVIEWED_WEBAPP_PLATFORM_EXTRA
platform_bonus = mkt.REVIEWED_SCORES.get(event)
devices_count = len(addon.device_types)
if devices_count < 2:
return 0
else:
return (devices_count - 1) * platform_bonus
@classmethod
def award_points(cls, user, addon, status, **kwargs):
"""Awards points to user based on an event and the queue.
`event` is one of the `REVIEWED_` keys in constants.
`status` is one of the `STATUS_` keys in constants.
"""
event = cls.get_event(addon, status, **kwargs)
score = mkt.REVIEWED_SCORES.get(event)
if score:
score += cls.get_extra_platform_points(addon, status)
cls.objects.create(user=user, addon=addon, score=score,
note_key=event)
cls.get_key(invalidate=True)
user_log.info(
(u'Awarding %s points to user %s for "%s" for addon %s'
% (score, user, mkt.REVIEWED_CHOICES[event], addon.id))
.encode('utf-8'))
return score
@classmethod
def award_moderation_points(cls, user, addon, review_id, undo=False):
"""Awards points to user based on moderated review."""
event = (mkt.REVIEWED_APP_REVIEW if not undo else
mkt.REVIEWED_APP_REVIEW_UNDO)
score = mkt.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info(
u'Awarding %s points to user %s for "%s" for review %s' % (
score, user, mkt.REVIEWED_CHOICES[event], review_id))
@classmethod
def award_additional_review_points(cls, user, addon, queue):
"""Awards points to user based on additional (Tarako) review."""
# TODO: generalize with other additional reviews queues
event = mkt.REVIEWED_WEBAPP_TARAKO
score = mkt.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info(
u'Awarding %s points to user %s for "%s" for addon %s' %
(score, user, mkt.REVIEWED_CHOICES[event], addon.id))
@classmethod
def award_mark_abuse_points(cls, user, addon=None, website=None):
"""Awards points to user based on reading abuse reports."""
if addon:
event = mkt.REVIEWED_APP_ABUSE_REPORT
elif website:
event = mkt.REVIEWED_WEBSITE_ABUSE_REPORT
else:
# Nothing to do here.
return
score = mkt.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, website=website,
score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info(
u'Awarding %s points to user %s for "%s"' %
(score, user, mkt.REVIEWED_CHOICES[event]))
@classmethod
def get_total(cls, user):
"""Returns total points by user."""
key = cls.get_key('get_total:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = (ReviewerScore.objects.filter(user=user)
.aggregate(total=Sum('score'))
.values())[0]
if val is None:
val = 0
cache.set(key, val, None)
return val
@classmethod
def get_recent(cls, user, limit=5):
"""Returns most recent ReviewerScore records."""
key = cls.get_key('get_recent:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = ReviewerScore.objects.filter(user=user)
val = list(val[:limit])
cache.set(key, val, None)
return val
@classmethod
def get_performance(cls, user):
"""Returns sum of reviewer points."""
key = cls.get_key('get_performance:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s
ORDER BY `total` DESC
"""
val = list(ReviewerScore.objects.raw(sql, [user.id]))
cache.set(key, val, None)
return val
@classmethod
def get_performance_since(cls, user, since):
"""
Returns sum of reviewer points since the given datetime.
"""
key = cls.get_key('get_performance:%s:%s' % (
user.id, since.isoformat()))
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s AND
`reviewer_scores`.`created` >= %s
ORDER BY `total` DESC
"""
val = list(ReviewerScore.objects.raw(sql, [user.id, since]))
cache.set(key, val, 3600)
return val
@classmethod
def _leaderboard_query(cls, since=None, types=None):
"""
Returns common SQL to leaderboard calls.
"""
query = (cls.objects
.values_list('user__id', 'user__display_name')
.annotate(total=Sum('score'))
.exclude(user__groups__name__in=('No Reviewer Incentives',
'Staff', 'Admins'))
.order_by('-total'))
if since is not None:
query = query.filter(created__gte=since)
if types is not None:
query = query.filter(note_key__in=types)
return query
@classmethod
def get_leaderboards(cls, user, days=7, types=None):
"""Returns leaderboards with ranking for the past given days.
This will return a dict of 3 items::
{'leader_top': [...],
'leader_near: [...],
'user_rank': (int)}
If the user is not in the leaderboard, or if the user is in the top 5,
'leader_near' will be an empty list and 'leader_top' will contain 5
elements instead of the normal 3.
"""
key = cls.get_key('get_leaderboards:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
week_ago = datetime.date.today() - datetime.timedelta(days=days)
leader_top = []
leader_near = []
query = cls._leaderboard_query(since=week_ago, types=types)
scores = []
user_rank = 0
in_leaderboard = False
for rank, row in enumerate(query, 1):
user_id, name, total = row
scores.append({
'user_id': user_id,
'name': name,
'rank': rank,
'total': int(total),
})
if user_id == user.id:
user_rank = rank
in_leaderboard = True
if not in_leaderboard:
leader_top = scores[:5]
else:
if user_rank <= 5: # User is in top 5, show top 5.
leader_top = scores[:5]
else:
leader_top = scores[:3]
leader_near = [scores[user_rank - 2], scores[user_rank - 1]]
try:
leader_near.append(scores[user_rank])
except IndexError:
pass # User is last on the leaderboard.
val = {
'leader_top': leader_top,
'leader_near': leader_near,
'user_rank': user_rank,
}
cache.set(key, val, None)
return val
@classmethod
def all_users_by_score(cls):
"""
Returns reviewers ordered by highest total points first.
"""
query = cls._leaderboard_query()
scores = []
for row in query:
user_id, name, total = row
user_level = len(mkt.REVIEWED_LEVELS) - 1
for i, level in enumerate(mkt.REVIEWED_LEVELS):
if total < level['points']:
user_level = i - 1
break
# Only show level if it changes.
if user_level < 0:
level = ''
else:
level = mkt.REVIEWED_LEVELS[user_level]['name']
scores.append({
'user_id': user_id,
'name': name,
'total': int(total),
'level': level,
})
prev = None
for score in reversed(scores):
if score['level'] == prev:
score['level'] = ''
else:
prev = score['level']
return scores
ReviewerScore._meta.get_field('created').db_index = True
class EscalationQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'escalation_queue'
class RereviewQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'rereview_queue'
@classmethod
def flag(cls, addon, event, message=None):
cls.objects.get_or_create(addon=addon)
version = addon.current_version or addon.latest_version
if message:
mkt.log(event, addon, version, details={'comments': message})
else:
mkt.log(event, addon, version)
# TODO: if we ever get rid of ActivityLog for reviewer notes, replace
# all flag calls to use the comm constant and not have to use
# ACTION_MAP.
create_comm_note(addon, version, None, message,
note_type=comm.ACTION_MAP(event))
RereviewQueue._meta.get_field('created').db_index = True
def tarako_passed(review):
"""Add the tarako tag to the app."""
tag = Tag(tag_text='tarako')
tag.save_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
def tarako_failed(review):
"""Remove the tarako tag from the app."""
tag = Tag(tag_text='tarako')
tag.remove_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
class AdditionalReviewManager(ManagerBase):
def unreviewed(self, queue, and_approved=False, descending=False):
query = {
'passed': None,
'queue': queue,
}
if and_approved:
query['app__status__in'] = mkt.WEBAPPS_APPROVED_STATUSES
if descending:
created_order = '-created'
else:
created_order = 'created'
return (self.get_queryset()
.filter(**query)
.order_by('-app__priority_review', created_order))
def latest_for_queue(self, queue):
try:
return self.get_queryset().filter(queue=queue).latest()
except AdditionalReview.DoesNotExist:
return None
class AdditionalReview(ModelBase):
app = models.ForeignKey(Webapp)
queue = models.CharField(max_length=30)
passed = models.NullBooleanField()
review_completed = models.DateTimeField(null=True)
comment = models.CharField(null=True, blank=True, max_length=255)
reviewer = models.ForeignKey('users.UserProfile', null=True, blank=True)
objects = AdditionalReviewManager()
class Meta:
db_table = 'additional_review'
unique_together = ('app', 'queue')
get_latest_by = 'created'
@property
def pending(self):
return self.passed is None
@property
def failed(self):
return self.passed is False
def __init__(self, *args, **kwargs):
super(AdditionalReview, self).__init__(*args, **kwargs)
from mkt.reviewers.utils import log_reviewer_action
self.log_reviewer_action = log_reviewer_action
def execute_post_review_task(self):
"""
Call the correct post-review function for the queue.
"""
# TODO: Pull this function from somewhere based on self.queue.
if self.passed is None:
raise ValueError('cannot execute post-review task when unreviewed')
elif self.passed:
tarako_passed(self)
action = mkt.LOG.PASS_ADDITIONAL_REVIEW
else:
tarako_failed(self)
action = mkt.LOG.FAIL_ADDITIONAL_REVIEW
self.log_reviewer_action(
self.app, self.reviewer, self.comment or '', action,
queue=self.queue)
ReviewerScore.award_additional_review_points(self.reviewer, self.app,
self.queue)
def cleanup_queues(sender, instance, **kwargs):
RereviewQueue.objects.filter(addon=instance).delete()
EscalationQueue.objects.filter(addon=instance).delete()
models.signals.post_delete.connect(cleanup_queues, sender=Webapp,
dispatch_uid='queue-addon-cleanup')
def update_search_index(sender, instance, **kwargs):
WebappIndexer.index_ids([instance.addon_id])
for model in (RereviewQueue, EscalationQueue):
models.signals.post_save.connect(
update_search_index, sender=model,
dispatch_uid='%s-save-update-index' % model._meta.model_name)
models.signals.post_delete.connect(
update_search_index, sender=model,
dispatch_uid='%s-delete-update-index' % model._meta.model_name)
| {
"content_hash": "00006ddc0c904c527a409f02b23ea4f7",
"timestamp": "",
"source": "github",
"line_count": 525,
"max_line_length": 79,
"avg_line_length": 33.645714285714284,
"alnum_prop": 0.5754642210144928,
"repo_name": "mudithkr/zamboni",
"id": "f2d7369d2aa24c07e054c1b7882b345ffe80a390",
"size": "17664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/reviewers/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357271"
},
{
"name": "HTML",
"bytes": "2279234"
},
{
"name": "JavaScript",
"bytes": "533454"
},
{
"name": "Makefile",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "4301380"
},
{
"name": "Shell",
"bytes": "11156"
},
{
"name": "Smarty",
"bytes": "1369"
}
],
"symlink_target": ""
} |
""" TODO: Add docstring """
import re
import pexpect
class MediaObject(object):
"""Represents an encodable object"""
def __init__(self, input_filename, output_filename):
self.input_filename = input_filename
self.output_filename = output_filename
self.media_duration = self.get_media_duration()
# INFO: All other media information could potentially be put here too
def get_media_duration(self):
"""
Spawns an avprobe process to get the media duration.
Spawns an avprobe process and saves the output to a list, then uses
regex to find the duration of the media and return it as an integer.
"""
info_process = pexpect.spawn("/usr/bin/avprobe " + self.input_filename)
subprocess_output = info_process.readlines()
info_process.close
# Non-greedy match on characters 'Duration: ' followed by
# number in form 00:00:00:00
regex_group = re.compile(".*?Duration: .*?(\\d+):(\\d+):(\\d+).(\\d+)",
re.IGNORECASE | re.DOTALL)
# Exits as soon as duration is found
# PERF: Perform some tests to find the min number of lines
# certain not to contain the duration, then operate on a slice
# not containing those lines
for line in subprocess_output:
regex_match = regex_group.search(line)
if regex_match:
# Return the total duration in seconds
return ((int(regex_match.group(1)) * 3600) + # Hours
(int(regex_match.group(2)) * 60) + # Minutes
int(regex_match.group(3)) + # Seconds
# Round milliseconds to nearest second
1 if int(regex_match.group(3)) > 50 else 0)
# Not found so it's possible the process terminated early or an update
# broke the regex. Unlikely but we must return something just in case.
return -1
| {
"content_hash": "af9f5b0b915bd30ab2aace0a393f2c27",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 43.47826086956522,
"alnum_prop": 0.5915,
"repo_name": "thethomaseffect/travers-media-tools",
"id": "8bc713893f094a797174f4e97f59580181e33cff",
"size": "2000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traversme/encoder/media_object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24867"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='django-nose',
version='0.1',
description='Django test runner that uses nose.',
long_description=open('README.rst').read(),
author='Jeff Balogh',
author_email='me@jeffbalogh.org',
url='http://github.com/jbalogh/django-nose',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['nose'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| {
"content_hash": "07702ee5cd9b3264d7d8e016c8b8b7a6",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 71,
"avg_line_length": 30.962962962962962,
"alnum_prop": 0.6220095693779905,
"repo_name": "jsocol/django-nose",
"id": "d7345970ce67c06627e1daa264a8ede0dc3dd5f6",
"size": "836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6318"
},
{
"name": "Shell",
"bytes": "686"
}
],
"symlink_target": ""
} |
import gdb
import pwndbg
import pwndbg.gdblib.arch
import pwndbg.gdblib.memory
import pwndbg.gdblib.symbol
import pwndbg.gdblib.typeinfo
import pwndbg.glibc
import pwndbg.heap
import tests
from pwndbg.heap.ptmalloc import SymbolUnresolvableError
HEAP_MALLOC_CHUNK = tests.binaries.get("heap_malloc_chunk.out")
def generate_expected_malloc_chunk_output(chunks):
expected = {}
expected["allocated"] = [
"Allocated chunk | PREV_INUSE",
f"Addr: {chunks['allocated'].address}",
f"Size: 0x{int(chunks['allocated']['mchunk_size' if 'mchunk_size' in (f.name for f in chunks['allocated'].type.fields()) else 'size']):02x}",
"",
]
expected["tcache"] = [
f"Free chunk ({'tcachebins' if pwndbg.heap.current.has_tcache else 'fastbins'}) | PREV_INUSE",
f"Addr: {chunks['tcache'].address}",
f"Size: 0x{int(chunks['tcache']['mchunk_size' if 'mchunk_size' in (f.name for f in chunks['tcache'].type.fields()) else 'size']):02x}",
f"fd: 0x{int(chunks['tcache']['fd']):02x}",
"",
]
expected["fast"] = [
"Free chunk (fastbins) | PREV_INUSE",
f"Addr: {chunks['fast'].address}",
f"Size: 0x{int(chunks['fast']['mchunk_size' if 'mchunk_size' in (f.name for f in chunks['fast'].type.fields()) else 'size']):02x}",
f"fd: 0x{int(chunks['fast']['fd']):02x}",
"",
]
expected["small"] = [
"Free chunk (smallbins) | PREV_INUSE",
f"Addr: {chunks['small'].address}",
f"Size: 0x{int(chunks['small']['mchunk_size' if 'mchunk_size' in (f.name for f in chunks['small'].type.fields()) else 'size']):02x}",
f"fd: 0x{int(chunks['small']['fd']):02x}",
f"bk: 0x{int(chunks['small']['bk']):02x}",
"",
]
expected["large"] = [
"Free chunk (largebins) | PREV_INUSE",
f"Addr: {chunks['large'].address}",
f"Size: 0x{int(chunks['large']['mchunk_size' if 'mchunk_size' in (f.name for f in chunks['large'].type.fields()) else 'size']):02x}",
f"fd: 0x{int(chunks['large']['fd']):02x}",
f"bk: 0x{int(chunks['large']['bk']):02x}",
f"fd_nextsize: 0x{int(chunks['large']['fd_nextsize']):02x}",
f"bk_nextsize: 0x{int(chunks['large']['bk_nextsize']):02x}",
"",
]
expected["unsorted"] = [
"Free chunk (unsortedbin) | PREV_INUSE",
f"Addr: {chunks['unsorted'].address}",
f"Size: 0x{int(chunks['unsorted']['mchunk_size' if 'mchunk_size' in (f.name for f in chunks['unsorted'].type.fields()) else 'size']):02x}",
f"fd: 0x{int(chunks['unsorted']['fd']):02x}",
f"bk: 0x{int(chunks['unsorted']['bk']):02x}",
"",
]
return expected
def test_malloc_chunk_command(start_binary):
start_binary(HEAP_MALLOC_CHUNK)
gdb.execute("break break_here")
gdb.execute("continue")
chunks = {}
results = {}
chunk_types = ["allocated", "tcache", "fast", "small", "large", "unsorted"]
for name in chunk_types:
chunks[name] = pwndbg.gdblib.memory.poi(
pwndbg.heap.current.malloc_chunk, gdb.lookup_symbol(f"{name}_chunk")[0].value()
)
results[name] = gdb.execute(f"malloc_chunk {name}_chunk", to_string=True).splitlines()
expected = generate_expected_malloc_chunk_output(chunks)
for name in chunk_types:
assert results[name] == expected[name]
gdb.execute("continue")
# Print main thread's chunk from another thread
assert gdb.selected_thread().num == 2
results["large"] = gdb.execute("malloc_chunk large_chunk", to_string=True).splitlines()
expected = generate_expected_malloc_chunk_output(chunks)
assert results["large"] == expected["large"]
gdb.execute("continue")
# Test some non-main-arena chunks
for name in chunk_types:
chunks[name] = pwndbg.gdblib.memory.poi(
pwndbg.heap.current.malloc_chunk, gdb.lookup_symbol(f"{name}_chunk")[0].value()
)
results[name] = gdb.execute(f"malloc_chunk {name}_chunk", to_string=True).splitlines()
expected = generate_expected_malloc_chunk_output(chunks)
expected["allocated"][0] += " | NON_MAIN_ARENA"
expected["tcache"][0] += " | NON_MAIN_ARENA"
expected["fast"][0] += " | NON_MAIN_ARENA"
for name in chunk_types:
assert results[name] == expected[name]
# Print another thread's chunk from the main thread
gdb.execute("thread 1")
assert gdb.selected_thread().num == 1
results["large"] = gdb.execute("malloc_chunk large_chunk", to_string=True).splitlines()
assert results["large"] == expected["large"]
def test_malloc_chunk_command_heuristic(start_binary):
start_binary(HEAP_MALLOC_CHUNK)
gdb.execute("set resolve-heap-via-heuristic on")
gdb.execute("break break_here")
gdb.execute("continue")
chunks = {}
results = {}
chunk_types = ["allocated", "tcache", "fast", "small", "large", "unsorted"]
for name in chunk_types:
chunks[name] = pwndbg.heap.current.malloc_chunk(
gdb.lookup_symbol(f"{name}_chunk")[0].value()
)
results[name] = gdb.execute(f"malloc_chunk {name}_chunk", to_string=True).splitlines()
expected = generate_expected_malloc_chunk_output(chunks)
for name in chunk_types:
assert results[name] == expected[name]
gdb.execute("continue")
# Print main thread's chunk from another thread
assert gdb.selected_thread().num == 2
results["large"] = gdb.execute("malloc_chunk large_chunk", to_string=True).splitlines()
expected = generate_expected_malloc_chunk_output(chunks)
assert results["large"] == expected["large"]
gdb.execute("continue")
# Test some non-main-arena chunks
for name in chunk_types:
chunks[name] = pwndbg.heap.current.malloc_chunk(
gdb.lookup_symbol(f"{name}_chunk")[0].value()
)
results[name] = gdb.execute(f"malloc_chunk {name}_chunk", to_string=True).splitlines()
expected = generate_expected_malloc_chunk_output(chunks)
expected["allocated"][0] += " | NON_MAIN_ARENA"
expected["tcache"][0] += " | NON_MAIN_ARENA"
expected["fast"][0] += " | NON_MAIN_ARENA"
for name in chunk_types:
assert results[name] == expected[name]
# Print another thread's chunk from the main thread
gdb.execute("thread 1")
assert gdb.selected_thread().num == 1
results["large"] = gdb.execute("malloc_chunk large_chunk", to_string=True).splitlines()
assert results["large"] == expected["large"]
class mock_for_heuristic:
def __init__(self, mock_symbols=[], mock_all=False, mess_up_memory=False):
self.mock_symbols = (
mock_symbols # every symbol's address in the list will be mocked to `None`
)
self.mock_all = mock_all # all symbols will be mocked to `None`
# Save `pwndbg.gdblib.symbol.address` and `pwndbg.gdblib.symbol.static_linkage_symbol_address` before mocking
self.saved_address_func = pwndbg.gdblib.symbol.address
self.saved_static_linkage_symbol_address_func = (
pwndbg.gdblib.symbol.static_linkage_symbol_address
)
# We mess up the memory in the page of the symbols, to make sure that the heuristic will not succeed by parsing the memory
self.mess_up_memory = mess_up_memory
if mess_up_memory:
# Save all the memory before we mess it up
self.page = pwndbg.heap.current.possible_page_of_symbols
self.saved_memory = pwndbg.gdblib.memory.read(self.page.vaddr, self.page.memsz)
def __enter__(self):
def mock(original):
def _mock(symbol, *args, **kwargs):
if self.mock_all:
return None
for s in self.mock_symbols:
if s == symbol:
return None
return original(symbol, *args, **kwargs)
return _mock
# Mock `pwndbg.gdblib.symbol.address` and `pwndbg.gdblib.symbol.static_linkage_symbol_address`
pwndbg.gdblib.symbol.address = mock(pwndbg.gdblib.symbol.address)
pwndbg.gdblib.symbol.static_linkage_symbol_address = mock(
pwndbg.gdblib.symbol.static_linkage_symbol_address
)
if self.mess_up_memory:
# Fill the page with `0xff`
pwndbg.gdblib.memory.write(self.page.vaddr, b"\xff" * self.page.memsz)
def __exit__(self, exc_type, exc_value, traceback):
# Restore `pwndbg.gdblib.symbol.address` and `pwndbg.gdblib.symbol.static_linkage_symbol_address`
pwndbg.gdblib.symbol.address = self.saved_address_func
pwndbg.gdblib.symbol.static_linkage_symbol_address = (
self.saved_static_linkage_symbol_address_func
)
if self.mess_up_memory:
# Restore the memory
pwndbg.gdblib.memory.write(self.page.vaddr, self.saved_memory)
def test_main_arena_heuristic(start_binary):
start_binary(HEAP_MALLOC_CHUNK)
gdb.execute("set resolve-heap-via-heuristic on")
gdb.execute("break break_here")
gdb.execute("continue")
# Use the debug symbol to get the address of `main_arena`
main_arena_addr_via_debug_symbol = pwndbg.gdblib.symbol.static_linkage_symbol_address(
"main_arena"
) or pwndbg.gdblib.symbol.address("main_arena")
# Level 1: We check we can get the address of `main_arena` from debug symbols and the struct of `main_arena` is correct
assert pwndbg.heap.current.main_arena is not None
# Check the address of `main_arena` is correct
assert pwndbg.heap.current.main_arena.address == main_arena_addr_via_debug_symbol
# Check the struct size is correct
assert (
pwndbg.heap.current.main_arena._gdbValue.type.sizeof
== pwndbg.gdblib.typeinfo.lookup_types("struct malloc_state").sizeof
)
pwndbg.heap.current = type(pwndbg.heap.current)() # Reset the heap object of pwndbg
# Level 2.1: We check we can get the address of `main_arena` by parsing the assembly code of `malloc_trim`
with mock_for_heuristic(["main_arena"], mess_up_memory=True):
assert pwndbg.heap.current.main_arena is not None
# Check the address of `main_arena` is correct
assert pwndbg.heap.current.main_arena.address == main_arena_addr_via_debug_symbol
pwndbg.heap.current = type(pwndbg.heap.current)() # Reset the heap object of pwndbg
# Level 2.2: No `__malloc_hook` this time, because it's possible to find `main_arena` by some magic about it
with mock_for_heuristic(["main_arena", "__malloc_hook"], mess_up_memory=True):
assert pwndbg.heap.current.main_arena is not None
# Check the address of `main_arena` is correct
assert pwndbg.heap.current.main_arena.address == main_arena_addr_via_debug_symbol
pwndbg.heap.current = type(pwndbg.heap.current)() # Reset the heap object of pwndbg
# Level 3: We check we can get the address of `main_arena` by parsing the memory
with mock_for_heuristic(mock_all=True):
# Check the address of `main_arena` is correct
assert pwndbg.heap.current.main_arena.address == main_arena_addr_via_debug_symbol
def test_mp_heuristic(start_binary):
start_binary(HEAP_MALLOC_CHUNK)
gdb.execute("set resolve-heap-via-heuristic on")
gdb.execute("break break_here")
gdb.execute("continue")
# Use the debug symbol to get the address of `mp_`
mp_addr_via_debug_symbol = pwndbg.gdblib.symbol.static_linkage_symbol_address(
"mp_"
) or pwndbg.gdblib.symbol.address("mp_")
# Level 1: We check we can get the address of `mp_` from debug symbols and the struct of `mp_` is correct
assert pwndbg.heap.current.mp is not None
# Check the address of `main_arena` is correct
assert pwndbg.heap.current.mp.address == mp_addr_via_debug_symbol
# Check the struct size is correct
assert (
pwndbg.heap.current.mp.type.sizeof
== pwndbg.gdblib.typeinfo.lookup_types("struct malloc_par").sizeof
)
pwndbg.heap.current = type(pwndbg.heap.current)() # Reset the heap object of pwndbg
# Level 2: We check we can get the address of `mp_` by parsing the assembly code of `__libc_free`
with mock_for_heuristic(["mp_"], mess_up_memory=True):
assert pwndbg.heap.current.mp is not None
# Check the address of `mp_` is correct
assert pwndbg.heap.current.mp.address == mp_addr_via_debug_symbol
pwndbg.heap.current = type(pwndbg.heap.current)() # Reset the heap object of pwndbg
# Level 3: We check we can get the address of `mp_` by parsing the memory
with mock_for_heuristic(mock_all=True):
# Check the address of `mp_` is correct
assert pwndbg.heap.current.mp.address == mp_addr_via_debug_symbol
def test_global_max_fast_heuristic(start_binary):
# TODO: Support other architectures or different libc versions
start_binary(HEAP_MALLOC_CHUNK)
gdb.execute("set resolve-heap-via-heuristic on")
gdb.execute("break break_here")
gdb.execute("continue")
# Use the debug symbol to find the address of `global_max_fast`
global_max_fast_addr_via_debug_symbol = pwndbg.gdblib.symbol.static_linkage_symbol_address(
"global_max_fast"
) or pwndbg.gdblib.symbol.address("global_max_fast")
assert global_max_fast_addr_via_debug_symbol is not None
# Level 1: We check we can get the address of `global_max_fast` from debug symbols and the value of `global_max_fast` is correct
assert pwndbg.heap.current.global_max_fast is not None
# Check the address of `global_max_fast` is correct
assert pwndbg.heap.current._global_max_fast_addr == global_max_fast_addr_via_debug_symbol
pwndbg.heap.current = type(pwndbg.heap.current)() # Reset the heap object of pwndbg
# Level 2: We check we can get the address of `global_max_fast` by parsing the assembly code of `__libc_free`
# Mock the address of `global_max_fast` to None
with mock_for_heuristic(["global_max_fast"]):
# Use heuristic to find `global_max_fast`
assert pwndbg.heap.current.global_max_fast is not None
# Check the address of `global_max_fast` is correct
assert pwndbg.heap.current._global_max_fast_addr == global_max_fast_addr_via_debug_symbol
def test_thread_cache_heuristic(start_binary):
# TODO: Support other architectures or different libc versions
start_binary(HEAP_MALLOC_CHUNK)
gdb.execute("set resolve-heap-via-heuristic on")
gdb.execute("break break_here")
gdb.execute("continue")
# Use the debug symbol to find the address of `thread_cache`
tcache_addr_via_debug_symbol = pwndbg.gdblib.symbol.static_linkage_symbol_address(
"tcache"
) or pwndbg.gdblib.symbol.address("tcache")
thread_cache_addr_via_debug_symbol = pwndbg.gdblib.memory.u(tcache_addr_via_debug_symbol)
# Level 1: We check we can get the address of `thread_cache` from debug symbols and the struct of `thread_cache` is correct
assert pwndbg.heap.current.thread_cache is not None
# Check the address of `thread_cache` is correct
assert pwndbg.heap.current.thread_cache.address == thread_cache_addr_via_debug_symbol
# Check the struct size is correct
assert (
pwndbg.heap.current.thread_cache.type.sizeof
== pwndbg.gdblib.typeinfo.lookup_types("struct tcache_perthread_struct").sizeof
)
pwndbg.heap.current = type(pwndbg.heap.current)() # Reset the heap object of pwndbg
# Level 2: We check we can get the address of `thread_cache` by parsing the assembly code of `__libc_malloc`
# TODO: Find a good way to check we scuessfully get the address of `thread_cache` by parsing the assembly code instead of using the first chunk of `thread_cache`
# Note: This only useful when we can NOT find the heap boundaries and the the arena is been shared, it should not be a big problem in most of the cases
# Level 3: We check we can get the address of `thread_cache` by using the first chunk
# Note: This will NOT work when can NOT find the heap boundaries or the the arena is been shared
with mock_for_heuristic(["tcache", "__libc_malloc"]):
# Check the address of `thread_cache` is correct
assert pwndbg.heap.current.thread_cache.address == thread_cache_addr_via_debug_symbol
def test_thread_arena_heuristic(start_binary):
# TODO: Support other architectures or different libc versions
start_binary(HEAP_MALLOC_CHUNK)
gdb.execute("set resolve-heap-via-heuristic on")
gdb.execute("break break_here")
gdb.execute("continue")
# Use the debug symbol to find the value of `thread_arena`
thread_arena_via_debug_symbol = pwndbg.gdblib.symbol.static_linkage_symbol_address(
"thread_arena"
) or pwndbg.gdblib.symbol.address("thread_arena")
assert thread_arena_via_debug_symbol is not None
thread_arena_via_debug_symbol = pwndbg.gdblib.memory.u(thread_arena_via_debug_symbol)
assert thread_arena_via_debug_symbol > 0
# Level 1: We check we can get the address of `thread_arena` from debug symbols and the value of `thread_arena` is correct
assert pwndbg.heap.current.thread_arena is not None
# Check the address of `thread_arena` is correct
assert pwndbg.heap.current.thread_arena.address == thread_arena_via_debug_symbol
pwndbg.heap.current = type(pwndbg.heap.current)() # Reset the heap object of pwndbg
# Level 2: We check we can get the address of `thread_arena` by parsing the assembly code of `__libc_calloc`
# Mock the address of `thread_arena` to None
with mock_for_heuristic(["thread_arena"]):
assert pwndbg.gdblib.symbol.address("thread_arena") is None
# Check the value of `thread_arena` is correct
assert pwndbg.heap.current.thread_arena.address == thread_arena_via_debug_symbol
def test_heuristic_fail_gracefully(start_binary):
# TODO: Support other architectures or different libc versions
start_binary(HEAP_MALLOC_CHUNK)
gdb.execute("set resolve-heap-via-heuristic on")
gdb.execute("break break_here")
gdb.execute("continue")
def _test_heuristic_fail_gracefully(name):
try:
getattr(pwndbg.heap.current, name)
raise AssertionError(
"The heuristic for pwndbg.heap.current.%s should fail with SymbolUnresolvableError"
% name
)
except SymbolUnresolvableError:
# That's the only exception we expect
pass
# Mock all address and mess up the memory
with mock_for_heuristic(mock_all=True, mess_up_memory=True):
_test_heuristic_fail_gracefully("main_arena")
_test_heuristic_fail_gracefully("mp")
_test_heuristic_fail_gracefully("global_max_fast")
_test_heuristic_fail_gracefully("thread_cache")
_test_heuristic_fail_gracefully("thread_arena")
| {
"content_hash": "6b9c1c1e07e1be36da9b72882dbe76b7",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 165,
"avg_line_length": 44.97374701670645,
"alnum_prop": 0.664243260454256,
"repo_name": "pwndbg/pwndbg",
"id": "0e1b90d083d0bc429114a46284a071586f315a54",
"size": "18844",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/gdb-tests/tests/heap/test_heap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1149"
},
{
"name": "C",
"bytes": "16361"
},
{
"name": "Dockerfile",
"bytes": "1762"
},
{
"name": "Go",
"bytes": "58"
},
{
"name": "Makefile",
"bytes": "2818"
},
{
"name": "Python",
"bytes": "2472150"
},
{
"name": "Shell",
"bytes": "17094"
}
],
"symlink_target": ""
} |
from flask import g, jsonify
from flask_httpauth import HTTPBasicAuth
from . import api
from ..models import User, AnonymousUser
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(email_or_token, password):
if email_or_token == '':
g.current_user = AnonymousUser()
return True
if password == '':
g.current_user = User.verify_auth_token(email_or_token)
g.token_used = True
return g.current_user is not None
user = User.query.filter_by(email=email_or_token).first()
if not user:
return False
g.current_user = user
g.token_used = False
return user.verify_password(password)
@api.before_request
def before_request():
pass
@api.route('/token/', methods=['POST', 'GET'])
@auth.login_required
def get_token():
if isinstance(g.current_user, AnonymousUser) or g.token_used:
return unauthorized('Invalid credentials')
return jsonify({
'token': g.current_user.generate_auth_token(3600),
'expiration': 3600
})
| {
"content_hash": "9bb7eb5c0bb7a8f3d5824c18674fb504",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 65,
"avg_line_length": 23.636363636363637,
"alnum_prop": 0.6605769230769231,
"repo_name": "neo1218/rest",
"id": "4f9cd805b0a7902e26be5c00a9f5a43a6d861243",
"size": "1057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/HelloAPI/app/api/authentication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10652"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import sys
from matplotlib.ticker import FormatStrFormatter
projectName=str(sys.argv[1])
inputFile=str(sys.argv[2])
outPrefix=str(sys.argv[3])+"/"+projectName
time = []
coverage = []
execution = []
memory = []
finished = []
remaining = []
depth = []
states = []
distinctStates = []
repetitionRatio = []
timeLimit=0
coverageLimit=100
executionLimit=0
memoryLimit=0
finishedLimit=0
remainingLimit=0
depthLimit=0
statesLimit=0
distinctStatesLimit=0
repetitionRatioLimit=1
stepStatus = False
def read_input():
global stepStatus
global time, coverage, execution, memory, finished, remaining, depth, states, distinctStates, repetitionRatio
global timeLimit, coverageLimit, executionLimit, memoryLimit, finishedLimit, remainingLimit, depthLimit, statesLimit, distinctStatesLimit, repetitionRatioLimit
time.append(0)
coverage.append(0)
execution.append(0)
memory.append(0)
finished.append(0)
remaining.append(0)
depth.append(0)
states.append(0)
distinctStates.append(0)
repetitionRatio.append(1)
f = open(inputFile, 'r')
lines = f.readlines()
i = 0
while i < len(lines)-9:
tVal = -1
cVal = -1
eVal = -1
mVal = -1
fVal = -1
rVal = -1
dVal = -1
sVal = -1
dsVal = -1
line = lines[i].strip()
i += 1
if ("Statistics Report" in line):
break
if ("Status after" in line):
if ("Step" in line):
stepStatus = True
tVal = line.split()[-2]
line = lines[i].strip()
i += 1
else:
continue
if ("Coverage:" in line):
cVal = line.split()[1]
line = lines[i].strip()
i += 1
else:
continue
if ("Iterations:" in line):
eVal = line.split()[1]
line = lines[i].strip()
i += 1
else:
continue
if ("Memory:" in line):
mVal = line.split()[1]
line = lines[i].strip()
i += 1
else:
continue
if ("Finished:" in line):
fVal = line.split()[1]
line = lines[i].strip()
i += 1
else:
continue
if ("Remaining:" in line):
rVal = line.split()[1]
line = lines[i].strip()
i += 1
else:
continue
if ("Depth:" in line):
dVal = line.split()[1]
line = lines[i].strip()
i += 1
else:
continue
if ("States:" in line):
sVal = line.split()[1]
line = lines[i].strip()
i += 1
else:
continue
if ("DistinctStates:" in line):
dsVal = line.split()[1]
line = lines[i].strip()
i += 1
else:
continue
assert(tVal != -1)
assert(cVal != -1)
assert(eVal != -1)
assert(mVal != -1)
assert(fVal != -1)
assert(rVal != -1)
assert(dVal != -1)
assert(sVal != -1)
assert(dsVal != -1)
time.append(float(tVal))
coverage.append(float(cVal))
execution.append(int(eVal))
memory.append(float(mVal))
finished.append(int(fVal))
remaining.append(int(rVal))
depth.append(float(dVal))
states.append(float(sVal))
distinctStates.append(float(dsVal))
if (float(dsVal) != 0):
repetitionRatio.append(float(sVal) / float(dsVal))
else:
repetitionRatio.append(1)
timeLimit = max(time)
coverageLimit = min(100, max(coverage))
if (coverageLimit > 5):
coverageLimit = 100
executionLimit = max(execution)
memoryLimit = max(memory)
finishedLimit = max(finished)
remainingLimit = max(remaining)
depthLimit = max(depth)
statesLimit = max(states)
distinctStatesLimit = max(distinctStates)
repetitionRatioLimit = max(repetitionRatio)
if (timeLimit == 0):
timeLimit = 1
if (coverageLimit == 0):
coverageLimit = 1
if (executionLimit == 0):
executionLimit = 1
if (memoryLimit == 0):
memoryLimit = 1
if (finishedLimit == 0):
finishedLimit = 1
if (remainingLimit == 0):
remainingLimit = 1
if (depthLimit == 0):
depthLimit = 1
if (statesLimit == 0):
statesLimit = 1
if (distinctStatesLimit == 0):
distinctStatesLimit = 1
if (repetitionRatioLimit == 0):
repetitionRatioLimit = 1
def set_plot(ax, x, y):
xLast = max(x)
yLast = max(y)
if (xLast != 0):
plt.figtext(0.01, 1.001, "y-max: "+str(yLast), ha="left")
if (yLast != 0):
plt.figtext(1.001, 0.01, "x-max: "+str(xLast), ha="right")
def coverage_vs_time():
fig, ax = plt.subplots()
ax.plot(time, coverage, color="blue", alpha=0.5, clip_on=False)
ax.set_ylim(0, 100)
ax.set_xlim(0, timeLimit)
set_plot(ax, time, coverage)
ax.set(xlabel='Time (s)', ylabel='Coverage (%)', title=projectName+": Coverage vs Time")
ax.grid()
fig.savefig(outPrefix+"_coverage.png", bbox_inches = "tight")
def coverage_vs_executions():
fig, ax = plt.subplots()
ax.plot(execution, coverage, color="green", alpha=0.5, clip_on=False)
ax.set_ylim(0, coverageLimit)
ax.set_xlim(0, executionLimit)
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
set_plot(ax, execution, coverage)
ax.set(xlabel='#Executions', ylabel='Coverage (%)', title=projectName+": Coverage vs #Executions")
ax.grid()
fig.savefig(outPrefix+"_coverage_vs_executions.png", bbox_inches = "tight")
def executions_vs_time():
fig, ax = plt.subplots()
ax.plot(time, execution, color="magenta", alpha=0.5, clip_on=False)
ax.set_ylim(0, executionLimit)
ax.set_xlim(0, timeLimit)
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
set_plot(ax, time, execution)
ax.set(xlabel='Time (s)', ylabel='#Executions', title=projectName+": #Executions vs Time")
ax.grid()
fig.savefig(outPrefix+"_executions.png", bbox_inches = "tight")
def memory_vs_time():
fig, ax = plt.subplots()
ax.plot(time, memory, color="olive", alpha=0.5, clip_on=False)
ax.set_ylim(0, memoryLimit)
ax.set_xlim(0, timeLimit)
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
set_plot(ax, time, memory)
ax.set(xlabel='Time (s)', ylabel='Memory (MB)', title=projectName+": Memory (MB) vs Time")
ax.grid()
fig.savefig(outPrefix+"_memory.png", bbox_inches = "tight")
def finished_vs_time():
fig, ax = plt.subplots()
ax.plot(time, finished, color="olive", alpha=0.5, clip_on=False)
ax.set_ylim(0, finishedLimit)
ax.set_xlim(0, timeLimit)
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
set_plot(ax, time, finished)
ax.set(xlabel='Time (s)', ylabel='#Finished Tasks', title=projectName+": #Finished Tasks vs Time")
ax.grid()
fig.savefig(outPrefix+"_finished.png", bbox_inches = "tight")
def remaining_vs_time():
fig, ax = plt.subplots()
ax.plot(time, remaining, color="darkorange", alpha=0.5, clip_on=False)
ax.set_ylim(0, remainingLimit)
ax.set_xlim(0, timeLimit)
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
set_plot(ax, time, remaining)
ax.set(xlabel='Time (s)', ylabel='#Remaining Tasks', title=projectName+": #Remaining Tasks vs Time")
ax.grid()
fig.savefig(outPrefix+"_remaining.png", bbox_inches = "tight")
def depth_vs_time():
fig, ax = plt.subplots()
ax.plot(time, depth, color="olive", alpha=0.5, clip_on=False)
ax.set_ylim(0, depthLimit)
ax.set_xlim(0, timeLimit)
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
set_plot(ax, time, depth)
plt.gca().invert_yaxis()
ax.set(xlabel='Time (s)', ylabel='Depth', title=projectName+": Depth vs Time")
ax.grid()
fig.savefig(outPrefix+"_depth.png", bbox_inches = "tight")
def states_vs_time():
fig, ax = plt.subplots()
ax.plot(time, states, color="olive", alpha=0.5, clip_on=False)
ax.set_ylim(0, statesLimit)
ax.set_xlim(0, timeLimit)
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
set_plot(ax, time, states)
ax.set(xlabel='Time (s)', ylabel='#States', title=projectName+": #States vs Time")
ax.grid()
fig.savefig(outPrefix+"_states.png", bbox_inches = "tight")
def distinct_states_vs_time():
fig, ax = plt.subplots()
ax.plot(time, distinctStates, color="olive", alpha=0.5, clip_on=False)
ax.set_ylim(0, distinctStatesLimit)
ax.set_xlim(0, timeLimit)
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
set_plot(ax, time, distinctStates)
ax.set(xlabel='Time (s)', ylabel='#DistinctStates', title=projectName+": #DistinctStates vs Time")
ax.grid()
fig.savefig(outPrefix+"_distinct_states.png", bbox_inches = "tight")
def repetition_vs_time():
fig, ax = plt.subplots()
ax.plot(time, repetitionRatio, color="olive", alpha=0.5, clip_on=False)
ax.set_ylim(0, repetitionRatioLimit)
ax.set_xlim(0, timeLimit)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
set_plot(ax, time, repetitionRatio)
ax.set(xlabel='Time (s)', ylabel='#States / #DistinctStates', title=projectName+": Repetition vs Time")
ax.grid()
fig.savefig(outPrefix+"_repetition.png", bbox_inches = "tight")
read_input()
coverage_vs_time()
coverage_vs_executions()
executions_vs_time()
memory_vs_time()
finished_vs_time()
remaining_vs_time()
depth_vs_time()
states_vs_time()
distinct_states_vs_time()
repetition_vs_time()
| {
"content_hash": "b745075e5d756ce4049db2aa0ad77b77",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 163,
"avg_line_length": 29.451515151515153,
"alnum_prop": 0.5979010186233151,
"repo_name": "p-org/P",
"id": "fb43a54394b94fe13ccb7c96e8a83dcc7946aa54",
"size": "9719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Src/PRuntimes/PSymbolicRuntime/scripts/psym_plots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "13367"
},
{
"name": "AspectJ",
"bytes": "5551"
},
{
"name": "Batchfile",
"bytes": "757"
},
{
"name": "C",
"bytes": "281876"
},
{
"name": "C#",
"bytes": "1149905"
},
{
"name": "CMake",
"bytes": "5202"
},
{
"name": "Java",
"bytes": "1079901"
},
{
"name": "OpenEdge ABL",
"bytes": "1008631"
},
{
"name": "PowerShell",
"bytes": "637"
},
{
"name": "Python",
"bytes": "40074"
},
{
"name": "Shell",
"bytes": "8088"
}
],
"symlink_target": ""
} |
from io import BytesIO
from unittest import main
from uuid import uuid4
import os
import time
import shutil
import re
from swiftclient import client
from swift.obj.diskfile import get_data_dir
from test.probe.common import ReplProbeTest
from swift.common.request_helpers import get_reserved_name
from swift.common.utils import readconf
EXCLUDE_FILES = re.compile(r'^(hashes\.(pkl|invalid)|lock(-\d+)?)$')
def collect_info(path_list):
"""
Recursive collect dirs and files in path_list directory.
:param path_list: start directory for collecting
:return: files_list, dir_list tuple of included
directories and files
"""
files_list = []
dir_list = []
for path in path_list:
temp_files_list = []
temp_dir_list = []
for root, dirs, files in os.walk(path):
files = [f for f in files if not EXCLUDE_FILES.match(f)]
temp_files_list += files
temp_dir_list += dirs
files_list.append(temp_files_list)
dir_list.append(temp_dir_list)
return files_list, dir_list
def find_max_occupancy_node(dir_list):
"""
Find node with maximum occupancy.
:param dir_list: list of directories for each node.
:return: number number node in list_dir
"""
count = 0
number = 0
length = 0
for dirs in dir_list:
if length < len(dirs):
length = len(dirs)
number = count
count += 1
return number
class TestReplicatorFunctions(ReplProbeTest):
"""
Class for testing replicators and replication servers.
By default configuration - replication servers not used.
For testing separate replication servers servers need to change
ring's files using set_info command or new ring's files with
different port values.
"""
def put_data(self):
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
obj = 'object-%s' % uuid4()
client.put_object(self.url, self.token, container, obj, 'VERIFY')
def test_main(self):
# Create one account, container and object file.
# Find node with account, container and object replicas.
# Delete all directories and files from this node (device).
# Wait 60 seconds and check replication results.
# Delete directories and files in objects storage without
# deleting file "hashes.pkl".
# Check, that files not replicated.
# Delete file "hashes.pkl".
# Check, that all files were replicated.
path_list = []
data_dir = get_data_dir(self.policy)
# Figure out where the devices are
for node_id in range(1, 5):
conf = readconf(self.configs['object-server'][node_id])
device_path = conf['app:object-server']['devices']
for dev in self.object_ring.devs:
if dev['port'] == int(conf['app:object-server']['bind_port']):
device = dev['device']
path_list.append(os.path.join(device_path, device))
# Put data to storage nodes
self.put_data()
# Get all data file information
(files_list, dir_list) = collect_info(path_list)
num = find_max_occupancy_node(dir_list)
test_node = path_list[num]
test_node_files_list = []
for files in files_list[num]:
if not files.endswith('.pending'):
test_node_files_list.append(files)
test_node_dir_list = []
for d in dir_list[num]:
if not d.startswith('tmp'):
test_node_dir_list.append(d)
# Run all replicators
try:
# Delete some files
for directory in os.listdir(test_node):
shutil.rmtree(os.path.join(test_node, directory))
self.assertFalse(os.listdir(test_node))
self.replicators.start()
# We will keep trying these tests until they pass for up to 60s
begin = time.time()
while True:
(new_files_list, new_dir_list) = collect_info([test_node])
try:
# Check replicate files and dir
for files in test_node_files_list:
self.assertIn(files, new_files_list[0])
for directory in test_node_dir_list:
self.assertIn(directory, new_dir_list[0])
# We want to make sure that replication is completely
# settled; any invalidated hashes should be rehashed so
# hashes.pkl is stable
for directory in os.listdir(
os.path.join(test_node, data_dir)):
hashes_invalid_path = os.path.join(
test_node, data_dir, directory, 'hashes.invalid')
self.assertEqual(os.stat(
hashes_invalid_path).st_size, 0)
break
except Exception:
if time.time() - begin > 60:
raise
time.sleep(1)
self.replicators.stop()
# Delete directories and files in objects storage without
# deleting file "hashes.pkl".
for directory in os.listdir(os.path.join(test_node, data_dir)):
for input_dir in os.listdir(os.path.join(
test_node, data_dir, directory)):
if os.path.isdir(os.path.join(
test_node, data_dir, directory, input_dir)):
shutil.rmtree(os.path.join(
test_node, data_dir, directory, input_dir))
self.replicators.once()
# Check, that files not replicated.
for directory in os.listdir(os.path.join(
test_node, data_dir)):
for input_dir in os.listdir(os.path.join(
test_node, data_dir, directory)):
self.assertFalse(os.path.isdir(
os.path.join(test_node, data_dir,
directory, input_dir)))
self.replicators.start()
# Now, delete file "hashes.pkl".
# Check, that all files were replicated.
for directory in os.listdir(os.path.join(test_node, data_dir)):
os.remove(os.path.join(
test_node, data_dir, directory, 'hashes.pkl'))
# We will keep trying these tests until they pass for up to 60s
begin = time.time()
while True:
try:
(new_files_list, new_dir_list) = collect_info([test_node])
# Check replicate files and dirs
for files in test_node_files_list:
self.assertIn(files, new_files_list[0])
for directory in test_node_dir_list:
self.assertIn(directory, new_dir_list[0])
break
except Exception:
if time.time() - begin > 60:
raise
time.sleep(1)
finally:
self.replicators.stop()
class TestReplicatorFunctionsReservedNames(TestReplicatorFunctions):
def put_data(self):
int_client = self.make_internal_client()
int_client.create_account(self.account)
container = get_reserved_name('container', str(uuid4()))
int_client.create_container(self.account, container,
headers={'X-Storage-Policy':
self.policy.name})
obj = get_reserved_name('object', str(uuid4()))
int_client.upload_object(
BytesIO(b'VERIFY'), self.account, container, obj)
if __name__ == '__main__':
main()
| {
"content_hash": "e4c491c434111ca748024cf5c1961ff2",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 78,
"avg_line_length": 37.11981566820276,
"alnum_prop": 0.5482309124767225,
"repo_name": "swiftstack/swift",
"id": "3bc7ed928e53d5dceaf02c5061873962c2cf5901",
"size": "8671",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/probe/test_replication_servers_working.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3088"
},
{
"name": "HTML",
"bytes": "625"
},
{
"name": "Python",
"bytes": "12427848"
},
{
"name": "Shell",
"bytes": "8704"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.backend.codegen.targets.jaxb_library import JaxbLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.task.simple_codegen_task import SimpleCodegenTask
class JaxbGen(SimpleCodegenTask, NailgunTask):
"""Generates java source files from jaxb schema (.xsd)."""
def __init__(self, *args, **kwargs):
"""
:param context: inherited parameter from Task
:param workdir: inherited parameter from Task
"""
super(JaxbGen, self).__init__(*args, **kwargs)
self.set_distribution(jdk=True)
self.gen_langs = set()
lang = 'java'
if self.context.products.isrequired(lang):
self.gen_langs.add(lang)
def _compile_schema(self, args):
classpath = self.dist.find_libs(['tools.jar'])
java_main = 'com.sun.tools.internal.xjc.Driver'
return self.runjava(classpath=classpath, main=java_main, args=args, workunit_name='xjc')
def synthetic_target_type(self, target):
return JavaLibrary
def is_gentarget(self, target):
return isinstance(target, JaxbLibrary)
def execute_codegen(self, target, target_workdir):
if not isinstance(target, JaxbLibrary):
raise TaskError('Invalid target type "{class_type}" (expected JaxbLibrary)'
.format(class_type=type(target).__name__))
for source in target.sources_relative_to_buildroot():
path_to_xsd = source
output_package = target.package
if output_package is None:
output_package = self._guess_package(source)
output_package = self._correct_package(output_package)
# NB(zundel): The -no-header option keeps it from writing a timestamp, making the
# output non-deterministic. See https://github.com/pantsbuild/pants/issues/1786
args = ['-p', output_package, '-d', target_workdir, '-no-header', path_to_xsd]
result = self._compile_schema(args)
if result != 0:
raise TaskError('xjc ... exited non-zero ({code})'.format(code=result))
@classmethod
def _guess_package(self, path):
"""Used in execute_codegen to actually invoke the compiler with the proper arguments, and in
_sources_to_be_generated to declare what the generated files will be.
"""
supported_prefixes = ('com', 'org', 'net',)
package = ''
slash = path.rfind(os.path.sep)
prefix_with_slash = max(path.rfind(os.path.join('', prefix, ''))
for prefix in supported_prefixes)
if prefix_with_slash < 0:
package = path[:slash]
elif prefix_with_slash >= 0:
package = path[prefix_with_slash:slash]
package = package.replace(os.path.sep, ' ')
package = package.strip().replace(' ', '.')
return package
@classmethod
def _correct_package(self, package):
package = package.replace('/', '.')
package = re.sub(r'^\.+', '', package)
package = re.sub(r'\.+$', '', package)
if re.search(r'\.{2,}', package) is not None:
raise ValueError('Package name cannot have consecutive periods! ({})'.format(package))
return package
| {
"content_hash": "8ece2eb36d6320bc6ef6a343b3a229f6",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 96,
"avg_line_length": 38.406976744186046,
"alnum_prop": 0.6690887072358462,
"repo_name": "cevaris/pants",
"id": "bdedbe35da042656664e5785629946fcdf492180",
"size": "3450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/codegen/tasks/jaxb_gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "4818"
},
{
"name": "Go",
"bytes": "1596"
},
{
"name": "HTML",
"bytes": "68162"
},
{
"name": "Java",
"bytes": "314216"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "7038"
},
{
"name": "Python",
"bytes": "3282583"
},
{
"name": "Scala",
"bytes": "77693"
},
{
"name": "Shell",
"bytes": "47890"
},
{
"name": "Thrift",
"bytes": "3485"
}
],
"symlink_target": ""
} |
"""
archivebot.py - discussion page archiving bot.
usage:
python pwb.py archivebot [OPTIONS] TEMPLATE_PAGE
Bot examines backlinks (Special:WhatLinksHere) to TEMPLATE_PAGE.
Then goes through all pages (unless a specific page specified using options)
and archives old discussions. This is done by breaking a page into threads,
then scanning each thread for timestamps. Threads older than a specified
threshold are then moved to another page (the archive), which can be named
either basing on the thread's name or then name can contain a counter which
will be incremented when the archive reaches a certain size.
Transcluded template may contain the following parameters:
{{TEMPLATE_PAGE
|archive =
|algo =
|counter =
|maxarchivesize =
|minthreadsleft =
|minthreadstoarchive =
|archiveheader =
|key =
}}
Meanings of parameters are:
archive Name of the page to which archived threads will be put.
Must be a subpage of the current page. Variables are
supported.
algo Specifies the maximum age of a thread. Must be
in the form old(<delay>) where <delay> specifies
the age in seconds (s), hours (h), days (d),
weeks (w), or years (y) like 24h or 5d. Default is
old(24h).
counter The current value of a counter which could be assigned as
variable. Will be updated by bot. Initial value is 1.
maxarchivesize The maximum archive size before incrementing the counter.
Value can be given with appending letter like K or M
which indicates KByte or MByte. Default value is 200K.
minthreadsleft Minimum number of threads that should be left on a page.
Default value is 5.
minthreadstoarchive The minimum number of threads to archive at once. Default
value is 2.
archiveheader Content that will be put on new archive pages as the
header. This parameter supports the use of variables.
Default value is {{talkarchive}}
key A secret key that (if valid) allows archives not to be
subpages of the page being archived.
Variables below can be used in the value for "archive" in the template above:
%(counter)d the current value of the counter
%(year)d year of the thread being archived
%(isoyear)d ISO year of the thread being archived
%(isoweek)d ISO week number of the thread being archived
%(semester)d semester term of the year of the thread being archived
%(quarter)d quarter of the year of the thread being archived
%(month)d month (as a number 1-12) of the thread being archived
%(monthname)s localized name of the month above
%(monthnameshort)s first three letters of the name above
%(week)d week number of the thread being archived
The ISO calendar starts with the Monday of the week which has at least four
days in the new Gregorian calendar. If January 1st is between Monday and
Thursday (including), the first week of that year started the Monday of that
week, which is in the year before if January 1st is not a Monday. If it's
between Friday or Sunday (including) the following week is then the first week
of the year. So up to three days are still counted as the year before.
See also:
- http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
- https://docs.python.org/3.4/library/datetime.html#datetime.date.isocalendar
Options (may be omitted):
-help show this help message and exit
-calc:PAGE calculate key for PAGE and exit
-file:FILE load list of pages from FILE
-force override security options
-locale:LOCALE switch to locale LOCALE
-namespace:NS only archive pages from a given namespace
-page:PAGE archive a single PAGE, default ns is a user talk page
-salt:SALT specify salt
"""
#
# (C) Pywikibot team, 2006-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import datetime
import locale
import math
import os
import re
import time
from collections import OrderedDict
from hashlib import md5
from math import ceil
import pywikibot
from pywikibot.date import apply_month_delta
from pywikibot import i18n
from pywikibot.textlib import (extract_sections, findmarker, TimeStripper,
to_local_digits)
from pywikibot.tools import issue_deprecation_warning, FrozenDict
ZERO = datetime.timedelta(0)
MW_KEYS = FrozenDict({
's': 'seconds',
'h': 'hours',
'd': 'days',
'w': 'weeks',
'y': 'years',
# 'months' and 'minutes' were removed because confusion outweighs merit
}, 'MW_KEYS is a dict constant')
class ArchiveBotSiteConfigError(pywikibot.Error):
"""There is an error originated by archivebot's on-site configuration."""
class MalformedConfigError(ArchiveBotSiteConfigError):
"""There is an error in the configuration template."""
class MissingConfigError(ArchiveBotSiteConfigError):
"""
The config is missing in the header.
It's in one of the threads or transcluded from another page.
"""
class AlgorithmError(MalformedConfigError):
"""Invalid specification of archiving algorithm."""
class ArchiveSecurityError(ArchiveBotSiteConfigError):
"""
Page title is not a valid archive of page being archived.
The page title is neither a subpage of the page being archived,
nor does it match the key specified in the archive configuration template.
"""
def str2localized_duration(site, string):
"""
Localise a shorthand duration.
Translates a duration written in the shorthand notation (ex. "24h", "7d")
into an expression in the local wiki language ("24 hours", "7 days").
"""
key, duration = checkstr(string)
template = site.mediawiki_message(MW_KEYS[key])
if template:
# replace plural variants
exp = i18n.translate(site.code, template, {'$1': int(duration)})
return exp.replace('$1', to_local_digits(duration, site.code))
else:
return to_local_digits(string, site.code)
def str2time(string, timestamp=None):
"""
Return a timedelta for a shorthand duration.
@param string: a string defining a time period:
300s - 300 seconds
36h - 36 hours
7d - 7 days
2w - 2 weeks (14 days)
1y - 1 year
@type string: str
@param timestamp: a timestamp to calculate a more accurate duration offset
used by years
@type timestamp: datetime.datetime
@return: the corresponding timedelta object
@rtype: datetime.timedelta
"""
key, duration = checkstr(string)
if duration.isdigit():
duration = int(duration)
else:
key = ''
if key in ['d', 's', 'h', 'w']: # days, seconds, hours, weeks
return datetime.timedelta(**{MW_KEYS[key]: duration})
if key == 'y': # years
days = math.ceil(duration * 365.25)
duration *= 12
else:
raise MalformedConfigError(
'Unrecognized parameter in template: {0}'.format(string))
if timestamp:
return apply_month_delta(
timestamp.date(), month_delta=duration) - timestamp.date()
else:
return datetime.timedelta(days=days)
def checkstr(string):
"""
Return the key and duration extracted from the string.
@param string: a string defining a time period:
300s - 300 seconds
36h - 36 hours
7d - 7 days
2w - 2 weeks (14 days)
1y - 1 year
@type string: str
@return: key and duration extracted form the string
@rtype: (str, str)
"""
key = string[-1]
if string.isdigit():
key = 's'
duration = string
issue_deprecation_warning('Time period without qualifier',
string + key, 1, UserWarning,
since='20161009')
else:
duration = string[:-1]
return key, duration
def str2size(string):
"""
Return a size for a shorthand size.
Accepts a string defining a size:
1337 - 1337 bytes
150K - 150 kilobytes
2M - 2 megabytes
Returns a tuple (size,unit), where size is an integer and unit is
'B' (bytes) or 'T' (threads).
"""
r = re.search(r'(\d+) *([BkKMT]?)', string)
if not r:
raise MalformedConfigError("Couldn't parse size: {}".format(string))
val, unit = (int(r.group(1)), r.group(2))
if unit == 'M':
val *= 1024
unit = 'K'
if unit in ('K', 'k'):
val *= 1024
if unit != 'T':
unit = 'B'
return val, unit
def template_title_regex(tpl_page):
"""
Return a regex that matches to variations of the template title.
It supports the transcluding variant as well as localized namespaces and
case-insensitivity depending on the namespace.
@param tpl_page: The template page
@type tpl_page: pywikibot.page.Page
"""
ns = tpl_page.site.namespaces[tpl_page.namespace()]
marker = '?' if ns.id == 10 else ''
title = tpl_page.title(with_ns=False)
if ns.case != 'case-sensitive':
title = '[{}{}]{}'.format(re.escape(title[0].upper()),
re.escape(title[0].lower()),
re.escape(title[1:]))
else:
title = re.escape(title)
return re.compile(r'(?:(?:%s):)%s%s' % ('|'.join(ns), marker, title))
def calc_md5_hexdigest(txt, salt):
"""Return md5 hexdigest computed from text and salt."""
s = md5()
s.update(salt.encode('utf-8'))
s.update(b'\n')
s.update(txt.encode('utf8'))
s.update(b'\n')
return s.hexdigest()
class TZoneUTC(datetime.tzinfo):
"""Class building a UTC tzinfo object."""
def utcoffset(self, dt):
"""Subclass implementation, return timedelta(0)."""
return ZERO
def tzname(self, dt):
"""Subclass implementation."""
return 'UTC'
def dst(self, dt):
"""Subclass implementation, return timedelta(0)."""
return ZERO
def __repr__(self):
"""Return a string representation."""
return '{}()'.format(self.__class__.__name__)
class DiscussionThread(object):
"""
An object representing a discussion thread on a page.
It represents something that is of the form:
== Title of thread ==
Thread content here. ~~~~
:Reply, etc. ~~~~
"""
def __init__(self, title, now, timestripper):
"""Initializer."""
self.title = title
self.now = now
self.ts = timestripper
self.code = self.ts.site.code
self.content = ''
self.timestamp = None
def __repr__(self):
"""Return a string representation."""
return '{}("{}",{} bytes)'.format(self.__class__.__name__, self.title,
len(self.content.encode('utf-8')))
def feed_line(self, line):
"""Add a line to the content and find the newest timestamp."""
if not self.content and not line:
return
self.content += line + '\n'
timestamp = self.ts.timestripper(line)
if not self.timestamp: # first time
self.timestamp = timestamp
if timestamp:
self.timestamp = max(self.timestamp, timestamp)
def size(self):
"""Return size of discussion thread."""
return len(self.title.encode('utf-8')) + len(
self.content.encode('utf-8')) + 12
def to_text(self):
"""Return wikitext discussion thread."""
return '== {} ==\n\n{}'.format(self.title, self.content)
def should_be_archived(self, archiver):
"""
Check whether thread has to be archived.
@return: the archivation reason as a tuple of localization args
@rtype: tuple
"""
# Archived by timestamp
algo = archiver.get_attr('algo')
re_t = re.search(r'^old\((.*)\)$', algo)
if re_t:
if not self.timestamp:
return None
# TODO: handle unsigned
maxage = str2time(re_t.group(1), self.timestamp)
if self.now - self.timestamp > maxage:
duration = str2localized_duration(archiver.site, re_t.group(1))
return ('duration', duration)
# TODO: handle marked with template
return None
class DiscussionPage(pywikibot.Page):
"""
A class that represents a single page of discussion threads.
Feed threads to it and run an update() afterwards.
"""
def __init__(self, source, archiver, params=None):
"""Initializer."""
super(DiscussionPage, self).__init__(source)
self.threads = []
self.full = False
self.archiver = archiver
# for testing purposes we allow archiver to be None and we are able
# to create the a DiscussionPage in this way:
# >>> import pywikibot as py
# >>> from scripts.archivebot import DiscussionPage
# >>> d = DiscussionPage(py.Page(py.Site(), <talk page name>), None)
if archiver is None:
self.timestripper = TimeStripper(self.site)
else:
self.timestripper = self.archiver.timestripper
self.params = params
self.now = datetime.datetime.utcnow().replace(tzinfo=TZoneUTC())
try:
self.load_page()
except pywikibot.NoPage:
self.header = archiver.get_attr('archiveheader',
i18n.twtranslate(
self.site.code,
'archivebot-archiveheader'))
if self.params:
self.header = self.header % self.params
def load_page(self):
"""Load the page to be archived and break it up into threads."""
self.header = ''
self.threads = []
self.archives = {}
self.archived_threads = 0
# Exclude unsupported headings (h1, h3, etc):
# adding the marker will make them ignored by extract_sections()
text = self.get()
marker = findmarker(text)
text = re.sub(r'^((=|={3,})[^=])', marker + r'\1', text, flags=re.M)
# Find threads, avoid archiving categories or interwiki
header, threads, footer = extract_sections(text, self.site)
header = header.replace(marker, '')
if header and footer:
self.header = '\n\n'.join((header.rstrip(), footer, ''))
else:
self.header = header + footer
for thread_heading, thread_content in threads:
cur_thread = DiscussionThread(thread_heading.strip('= '), self.now,
self.timestripper)
lines = thread_content.replace(marker, '').splitlines()
lines = lines[1:] # remove heading line
for line in lines:
cur_thread.feed_line(line)
self.threads.append(cur_thread)
# This extra info is not desirable when run under the unittest
# framework, which may be run either directly or via setup.py
if pywikibot.calledModuleName() not in ['archivebot_tests', 'setup']:
pywikibot.output('{} thread(s) found on {}'
.format(len(self.threads), self))
def feed_thread(self, thread, max_archive_size=(250 * 1024, 'B')):
"""Check whether archive size exceeded."""
self.threads.append(thread)
self.archived_threads += 1
if max_archive_size[1] == 'B':
if self.size() >= max_archive_size[0]:
self.full = True
elif max_archive_size[1] == 'T':
if len(self.threads) >= max_archive_size[0]:
self.full = True
return self.full
def size(self):
"""Return size of talk page threads."""
return len(self.header.encode('utf-8')) + sum(t.size()
for t in self.threads)
def update(self, summary, sort_threads=False):
"""Recombine threads and save page."""
if sort_threads:
pywikibot.output('Sorting threads...')
self.threads.sort(key=lambda t: t.timestamp)
newtext = re.sub('\n*$', '\n\n', self.header) # Fix trailing newlines
for t in self.threads:
newtext += t.to_text()
if self.full:
summary += ' ' + i18n.twtranslate(self.site.code,
'archivebot-archive-full')
self.text = newtext
self.save(summary)
class PageArchiver(object):
"""A class that encapsulates all archiving methods."""
algo = 'none'
def __init__(self, page, template, salt, force=False):
"""Initializer.
param page: a page object to be archived
type page: pywikibot.Page
param template: a template with configuration settings
type template: pywikibot.Page
param salt: salt value
type salt: str
param force: override security value
type force: bool
"""
self.attributes = OrderedDict([
('archive', ['', False]),
('algo', ['old(24h)', False]),
('counter', ['1', False]),
('maxarchivesize', ['200K', False]),
])
self.salt = salt
self.force = force
self.site = page.site
self.tpl = template
self.timestripper = TimeStripper(site=self.site)
self.page = DiscussionPage(page, self)
self.load_config()
self.comment_params = {
'from': self.page.title(),
}
self.archives = {}
self.archived_threads = 0
self.month_num2orig_names = {}
for n, (_long, _short) in enumerate(self.site.months_names):
self.month_num2orig_names[n + 1] = {'long': _long, 'short': _short}
def get_attr(self, attr, default=''):
"""Get an archiver attribute."""
return self.attributes.get(attr, [default])[0]
def set_attr(self, attr, value, out=True):
"""Set an archiver attribute."""
if attr == 'archive':
value = value.replace('_', ' ')
self.attributes[attr] = [value, out]
def saveables(self):
"""Return a list of saveable attributes."""
return [a for a in self.attributes if self.attributes[a][1]
and a != 'maxage']
def attr2text(self):
"""Return a template with archiver saveable attributes."""
return '{{%s\n%s\n}}' \
% (self.tpl.title(with_ns=(self.tpl.namespace() != 10)),
'\n'.join('|{} = {}'.format(a, self.get_attr(a))
for a in self.saveables()))
def key_ok(self):
"""Return whether key is valid."""
hexdigest = calc_md5_hexdigest(self.page.title(), self.salt)
return self.get_attr('key') == hexdigest
def load_config(self):
"""Load and validate archiver template."""
pywikibot.output('Looking for: {{%s}} in %s' % (self.tpl.title(),
self.page))
for tpl, params in self.page.raw_extracted_templates:
try: # Check tpl name before comparing; it might be invalid.
tpl_page = pywikibot.Page(self.site, tpl, ns=10)
tpl_page.title()
except pywikibot.Error:
continue
if tpl_page == self.tpl:
for item, value in params.items():
self.set_attr(item.strip(), value.strip())
break
else:
raise MissingConfigError('Missing or malformed template')
if not self.get_attr('algo', ''):
raise MissingConfigError('Missing argument "algo" in template')
if not self.get_attr('archive', ''):
raise MissingConfigError('Missing argument "archive" in template')
def feed_archive(self, archive, thread, max_archive_size, params=None):
"""
Feed the thread to one of the archives.
If it doesn't exist yet, create it.
Also check for security violations.
"""
title = archive.title()
page_title = self.page.title()
if not (self.force or title.startswith(page_title + '/')
or self.key_ok()):
raise ArchiveSecurityError(
'Archive page {} does not start with page title ({})!'
.format(archive, page_title))
if title not in self.archives:
self.archives[title] = DiscussionPage(archive, self, params)
return self.archives[title].feed_thread(thread, max_archive_size)
def analyze_page(self):
"""Analyze DiscussionPage."""
max_arch_size = str2size(self.get_attr('maxarchivesize'))
arch_counter = int(self.get_attr('counter', '1'))
oldthreads = self.page.threads
self.page.threads = []
whys = set()
pywikibot.output('Processing {} threads'.format(len(oldthreads)))
for t in oldthreads:
if len(oldthreads) - self.archived_threads \
<= int(self.get_attr('minthreadsleft', 5)):
self.page.threads.append(t)
continue # Because there's too little threads left.
# TODO: Make an option so that unstamped (unsigned) posts get
# archived.
why = t.should_be_archived(self)
if why:
archive = self.get_attr('archive')
lang = self.site.lang
params = {
'counter': to_local_digits(arch_counter, lang),
'year': to_local_digits(t.timestamp.year, lang),
'isoyear': to_local_digits(t.timestamp.isocalendar()[0],
lang),
'isoweek': to_local_digits(t.timestamp.isocalendar()[1],
lang),
'semester': to_local_digits(
int(ceil(float(t.timestamp.month) / 6)), lang),
'quarter': to_local_digits(
int(ceil(float(t.timestamp.month) / 3)), lang),
'month': to_local_digits(t.timestamp.month, lang),
'monthname': self.month_num2orig_names[
t.timestamp.month]['long'],
'monthnameshort': self.month_num2orig_names[
t.timestamp.month]['short'],
'week': to_local_digits(
int(time.strftime('%W',
t.timestamp.timetuple())), lang),
}
archive = pywikibot.Page(self.site, archive % params)
if self.feed_archive(archive, t, max_arch_size, params):
arch_counter += 1
self.set_attr('counter', str(arch_counter))
whys.add(why)
self.archived_threads += 1
else:
self.page.threads.append(t)
return whys
def run(self):
"""Process a single DiscussionPage object."""
if not self.page.botMayEdit():
return
whys = self.analyze_page()
mintoarchive = int(self.get_attr('minthreadstoarchive', 2))
if self.archived_threads < mintoarchive:
# We might not want to archive a measly few threads
# (lowers edit frequency)
pywikibot.output('Only {} (< {}) threads are old enough. Skipping'
.format(self.archived_threads, mintoarchive))
return
if whys:
# Search for the marker template
rx = re.compile(r'\{\{%s\s*?\n.*?\n\}\}'
% (template_title_regex(self.tpl).pattern),
re.DOTALL)
if not rx.search(self.page.header):
raise MalformedConfigError(
"Couldn't find the template in the header"
)
pywikibot.output('Archiving {0} thread(s).'
.format(self.archived_threads))
# Save the archives first (so that bugs don't cause a loss of data)
for a in sorted(self.archives.keys()):
self.comment_params['count'] = self.archives[
a].archived_threads
comment = i18n.twtranslate(self.site.code,
'archivebot-archive-summary',
self.comment_params)
self.archives[a].update(comment)
# Save the page itself
self.page.header = rx.sub(self.attr2text(), self.page.header)
self.comment_params['count'] = self.archived_threads
comma = self.site.mediawiki_message('comma-separator')
self.comment_params['archives'] \
= comma.join(a.title(as_link=True)
for a in self.archives.values())
# Find out the reasons and return them localized
translated_whys = set()
for why, arg in whys:
# Archived by timestamp
if why == 'duration':
translated_whys.add(
i18n.twtranslate(self.site.code,
'archivebot-older-than',
{'duration': arg,
'count': self.archived_threads}))
# TODO: handle unsigned or archived by template
self.comment_params['why'] = comma.join(translated_whys)
comment = i18n.twtranslate(self.site.code,
'archivebot-page-summary',
self.comment_params)
self.page.update(comment)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
filename = None
pagename = None
namespace = None
salt = ''
force = False
calc = None
templates = []
local_args = pywikibot.handle_args(args)
for arg in local_args:
option, _, value = arg.partition(':')
if not option.startswith('-'):
templates.append(arg)
continue
option = option[1:]
if option in ('file', 'filename'):
filename = value
elif option == 'locale':
# Required for english month names
locale.setlocale(locale.LC_TIME, value.encode('utf8'))
elif option == 'timezone':
os.environ['TZ'] = value.timezone
# Or use the preset value
if hasattr(time, 'tzset'):
time.tzset()
elif option == 'calc':
calc = value
elif option == 'salt':
salt = value
elif option == 'force':
force = True
elif option == 'page':
pagename = value
elif option == 'namespace':
namespace = value
site = pywikibot.Site()
if calc:
if not salt:
pywikibot.bot.suggest_help(missing_parameters=['-salt'])
return
page = pywikibot.Page(site, calc)
if page.exists():
calc = page.title()
else:
pywikibot.output(
'NOTE: the specified page "{0}" does not (yet) exist.'
.format(calc))
pywikibot.output('key = {}'.format(calc_md5_hexdigest(calc, salt)))
return
if not templates:
pywikibot.bot.suggest_help(
additional_text='No template was specified.')
return
for template_name in templates:
pagelist = []
tmpl = pywikibot.Page(site, template_name, ns=10)
if not filename and not pagename:
if namespace is not None:
ns = [str(namespace)]
else:
ns = []
pywikibot.output('Fetching template transclusions...')
for pg in tmpl.getReferences(only_template_inclusion=True,
follow_redirects=False,
namespaces=ns):
pagelist.append(pg)
if filename:
for pg in open(filename, 'r').readlines():
pagelist.append(pywikibot.Page(site, pg, ns=10))
if pagename:
pagelist.append(pywikibot.Page(site, pagename, ns=3))
pagelist = sorted(pagelist)
for pg in iter(pagelist):
pywikibot.output('Processing {}'.format(pg))
# Catching exceptions, so that errors in one page do not bail out
# the entire process
try:
archiver = PageArchiver(pg, tmpl, salt, force)
archiver.run()
except ArchiveBotSiteConfigError as e:
# no stack trace for errors originated by pages on-site
pywikibot.error('Missing or malformed template in page {}: {}'
.format(pg, e))
except Exception:
pywikibot.error('Error occurred while processing page {}'
.format(pg))
pywikibot.exception(tb=True)
if __name__ == '__main__':
main()
| {
"content_hash": "b6d94e22ec1e2b077c38bab3c752f5ae",
"timestamp": "",
"source": "github",
"line_count": 815,
"max_line_length": 79,
"avg_line_length": 36.4159509202454,
"alnum_prop": 0.5647427473971495,
"repo_name": "PersianWikipedia/pywikibot-core",
"id": "eddf7cf35d947f0bce83ec4714ada0e6bb58b68f",
"size": "29721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/archivebot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "4021871"
}
],
"symlink_target": ""
} |
"""This example creates an exchange rate.
To determine which exchange rates exist, run get_all_exchange_rates.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
exchange_rate_service = client.GetService('ExchangeRateService',
version='v201502')
# Create a new fixed exchange rate with currency code 'AUD', with direction
# FROM_NETWORK with a value of 1.5.
exchange_rate = {
'currencyCode': 'AUD',
'direction': 'FROM_NETWORK',
'exchangeRate': long(15000000000),
'refreshRate': 'FIXED'
}
created_exchange_rate = exchange_rate_service.createExchangeRates(
[exchange_rate])[0]
print ('Exchange rate with id \'%s,\' currency code \'%s,\' '
'direction \'%s,\' and exchange rate \'%.2f\' '
'was created.' % (created_exchange_rate['id'],
created_exchange_rate['currencyCode'],
created_exchange_rate['direction'],
(float(created_exchange_rate['exchangeRate']) /
10000000000)))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| {
"content_hash": "756c3456937f455d6b0f47e6bc22eeca",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 31.19047619047619,
"alnum_prop": 0.6038167938931298,
"repo_name": "wubr2000/googleads-python-lib",
"id": "f8928d03d261643c7fc9afd80334ae511ba4621c",
"size": "1928",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201502/exchange_rate_service/create_exchange_rates.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
} |
"""
autodoc
~~~~~~~
Autodoc Python implementation.
`Ruby autodoc <https://github.com/r7kamura/autodoc>`_
`Perl autodoc <https://metacpan.org/pod/Test::JsonAPI::Autodoc>`_
:copyright: (c) 2014-2018 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import json
from string import Template
from functools import wraps
from decorator import decorator
from autodoc._compat import to_unicode, parse_qsl
__version__ = '0.3'
class Response(object):
def parse(self, response):
raise NotImplementedError()
class WebTestResponse(Response):
def parse(self, response):
"""Parse WebTest response.
:param response: WebTest response object
"""
content_type = ''
for k, v in response.headers.iteritems():
if k == 'Content-Type':
content_type = v
status = response.status_code
params = ''
response_body = ''
if to_unicode(response.body) != '':
response_body = json.dumps(response.json_body, indent=2)
request = response.request
if to_unicode(request.body) != '':
params = json.dumps(request.json_body, indent=2)
ret = {
'status_code': status,
'response_content_type': content_type,
'response_body': response_body,
'target_url': '{0}://{1}'.format(request.scheme, request.host),
'request': '{0} {1}'.format(request.method, request.path_info),
'params': params
}
return ret
class RequestsResponse(Response):
def parse(self, response):
"""Parse requests response.
:param response: requests response object
"""
content_type = response.headers['Content-Type']
status = response.status_code
params = ''
response_body = ''
if to_unicode(response.content) != '':
response_body = json.loads(to_unicode(response.content))
#: Format indent.
#: {'foo': 'bar'}
#: will format to
#: {
#: 'foo': 'bar'
#: }
response_body = json.dumps(response_body, indent=2)
request = response.request
if request.body != '':
data = parse_qsl(request.body)
request_params = {}
for v in data:
#: v[0] = parameter key
#: v[1] = parameter value
#: ex:
#: user_id=foo&email=foo%40example.com
#: v[0] = user_id
#: v[1] = foo
request_params[v[0]] = v[1]
params = json.dumps(request_params, indent=2)
ret = {
'status_code': status,
'response_content_type': content_type,
'response_body': response_body,
'target_url': request.url,
'request': '{0} {1}'.format(request.method, request.path_url),
'params': params
}
return ret
class Autodoc(object):
def __init__(self):
self.clear()
root_path = os.path.dirname(os.path.abspath(__file__))
self.template_path = os.path.join(root_path, 'templates',
'template.rst')
self.separators = '='
def clear(self):
""" Clear all stored response. """
self.vars = []
def parse(self, describe, response):
"""Parse WebTest response.
:param describe: Description of document
:param response: WebTest response
"""
if response.__module__ == 'webtest.response':
klass = WebTestResponse()
elif response.__module__ == 'requests.models':
klass = RequestsResponse()
else:
return
document = klass.parse(response)
document['describe'] = describe
#: This variable is for Sphinx line separata.
document['describe_separators'] = self.separators * len(describe)
self.vars.append(document)
def create_document(self, output):
"""Read template file and render file.
:param output: Output file path.
"""
if not os.path.exists(self.template_path):
raise IOError('Template file not found.')
documents = []
with open(self.template_path, 'rb') as f:
data = f.read()
template = Template(to_unicode(data))
indent_targets = ['params', 'response_body']
for v in self.vars:
if self.template_path.endswith('.rst'):
for k in indent_targets:
lines = v[k].split('\n')
ret = []
for i, l in enumerate(lines):
if i > 0:
ret.append(' {0}'.format(l).rstrip())
else:
ret.append(l)
v[k] = '\n'.join(ret)
document = template.substitute(v)
documents.append(document)
with open(output, 'w') as f:
f.write('\n'.join(documents))
def _is_runnable(self):
if 'PYAUTODOC' in os.environ and os.environ['PYAUTODOC'] == '1':
return True
return False
def describe(self, *args, **kwargs):
"""Parse WebTest response.
@autodoc.describe('/ GET')
:param *args:
:param **kwargs:
"""
def _autodoc(func, *_args, **_kwargs):
if len(_args) > 0:
#: Instance or class method.
response = func(_args[0])
else:
#: Function.
if len(_kwargs) > 0:
response = func(**_kwargs)
else:
response = func()
self.parse(args[0], response)
return func
return decorator(_autodoc)
def generate(self, *args, **kwargs):
"""Generate document.
@autodoc.generate('output_file.rst', template='template.rst')
:param *args:
:param **kwargs:
"""
#: TODO Use decorator instead.
def _generate(func):
@wraps(func)
def __generate(*_args):
if len(_args) > 0:
arg = _args[0]
else:
arg = _args
if self._is_runnable() is False:
return func
func(arg)
if 'template' in kwargs:
template_path = kwargs['template']
if not os.path.isabs(template_path):
template_path = os.path.abspath(template_path)
self.template_path = template_path
path = args[0]
if not os.path.isabs(args[0]):
path = os.path.abspath(args[0])
self.create_document(path)
#: Initialize.
self.clear()
return func
return __generate
return _generate
autodoc = Autodoc()
| {
"content_hash": "626bb259fa79e7fb7b5c60447191b437",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 75,
"avg_line_length": 29.068825910931174,
"alnum_prop": 0.49735376044568247,
"repo_name": "heavenshell/py-autodoc",
"id": "de8aac5d7ba55ec8303628bc52e4906ad90f02e0",
"size": "7204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autodoc/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "25291"
}
],
"symlink_target": ""
} |
from runtests.mpi import MPITest
import pytest
import time
def test_benchmark1(benchmark):
comm = benchmark.comm
with benchmark("test 1"):
time.sleep((1+comm.rank)*0.25)
@pytest.mark.parametrize('x', [1, 2])
def test_benchmark2(benchmark, x):
comm = benchmark.comm
with benchmark("test 2"):
time.sleep((1+comm.rank)*0.25)
| {
"content_hash": "03a06c5a87d75ab3f62d224b224cb1e3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 38,
"avg_line_length": 22.375,
"alnum_prop": 0.6675977653631285,
"repo_name": "rainwoodman/mpi4py_test",
"id": "4a221b46cffa440adc8d81dcc9dea389237194d0",
"size": "358",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "runtests/mpi/tests/test_benchmark.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "24522"
},
{
"name": "Shell",
"bytes": "206"
}
],
"symlink_target": ""
} |
"""
Level loading and tileset code based on qq demo
https://bitbucket.org/thesheep/qq/overview
by Radomir Dopieralski <qq@sheep.art.pl>
@copyright: 2014 Andrew Conway <nalumc@gmail.com>
@license: BSD, see COPYING for details
"""
import pygame
import pygame.locals as pg
from random import randint
# Dimensions of the map tiles
MAP_TILE_WIDTH, MAP_TILE_HEIGHT = 24, 32
# Motion offsets for particular directions
# N E S W
DX = [0, 1, 0, -1]
DY = [-1, 0, 1, 0]
class TileCache(object):
"""Load the tilesets lazily into global cache"""
def __init__(self, width=32, height=None):
self.width = width
self.height = height or width
self.cache = {}
def __getitem__(self, filename):
"""Return a table of tiles, load it from disk if needed."""
key = (filename, self.width, self.height)
try:
return self.cache[key]
except KeyError:
tile_table = self._load_tile_table(filename, self.width,
self.height)
self.cache[key] = tile_table
return tile_table
def _load_tile_table(self, filename, width, height):
"""Load an image and split it into tiles."""
try:
image = pygame.image.load(filename)
except:
image = pygame.image.load('missing.png')
image = image.convert_alpha()
image_width, image_height = image.get_size()
tile_table = []
for tile_x in range(0, image_width/width):
line = []
tile_table.append(line)
for tile_y in range(0, image_height/height):
rect = (tile_x*width, tile_y*height, width, height)
line.append(image.subsurface(rect))
return tile_table
SPRITE_CACHE = TileCache()
MAP_CACHE = TileCache(MAP_TILE_WIDTH, MAP_TILE_HEIGHT)
TILE_CACHE = TileCache(32, 32)
class Sprite(pygame.sprite.Sprite):
"""Sprite for animated items and base class for Player."""
is_player = False
def __init__(self, game, item, frames=None):
super(Sprite, self).__init__()
self.game=game
self.item=item
if frames:
self.frames = frames
self.image = self.frames[0][0]
self.rect = self.image.get_rect()
self.animation = self.stand_animation()
self.pos = item['init_pos']
self.direction=-1
self.auto_step=0
self.auto_total=0
def _get_pos(self):
"""Check the current position of the sprite on the map."""
return (self.rect.midbottom[0]-12)/24, (self.rect.midbottom[1]-32)/32
def _set_pos(self, pos):
"""Set the position and depth of the sprite on the map."""
self.rect.midbottom = pos[0]*24+12, pos[1]*32+32
self.depth = self.rect.midbottom[1]
pos = property(_get_pos, _set_pos)
def auto_move(self):
if self.direction>=0:
self.move(3*DX[self.direction], 4*DY[self.direction])
self.auto_step-=1
self.auto_total+=1
if self.auto_step==0:
self.direction=-1
def move(self, dx, dy):
"""Change the position of the sprite on screen."""
self.rect.move_ip(dx, dy)
self.depth = self.rect.midbottom[1]
def stand_animation(self):
"""The default animation."""
while True:
# Change to next frame every two ticks
for frame in self.frames[0]:
self.image = frame
yield None
self.auto_move()
yield None
self.auto_move()
def update(self, *args):
"""Run the current animation."""
self.animation.next()
if 'monster' in self.item and self.direction==-1:
d=-1
p=self.game.level.player
other_monster=self.game.level.get_item(self.pos[0],self.pos[1], 'monster', [self.item])
if self.pos==p.pos:
d=randint(0,3)
elif other_monster is not None:
d=randint(0,3)
elif self.is_adjacent(p):
self.game.score.health-=1
elif self.get_distance(p)<6:
d=self.get_direction_to(p)
if d>=0:
xnew, ynew = self.pos[0]+DX[d], self.pos[1]+DY[d]
if not self.game.level.is_blocking(xnew, ynew):
self.direction=d
self.auto_step=8
def get_direction_to(self, other):
if abs(other.pos[0]-self.pos[0])>=abs(other.pos[1]-self.pos[1]):
if other.pos[0]<self.pos[0]:
d=3
else:
d=1
else:
if other.pos[1]<self.pos[1]:
d=0
else:
d=2
return d
def get_distance(self, other):
return abs(other.pos[0]-self.pos[0])+abs(other.pos[1]-self.pos[1])
def is_adjacent(self, other):
if self.get_distance(other)<=1:
return True
else:
return False
class Player(Sprite):
""" Display and animate the player character."""
is_player = True
def __init__(self, game, item):
self.frames = SPRITE_CACHE["player.png"]
Sprite.__init__(self, game, item)
self.direction = 2
self.animation = None
self.image = self.frames[self.direction][0]
def walk_animation(self):
"""Animation for the player walking."""
# This animation is hardcoded for 4 frames and 24x32 map tiles
for frame in range(4):
self.image = self.frames[self.direction][frame]
yield None
self.auto_move()
yield None
self.auto_move()
def update(self, *args):
"""Run the current animation or just stand there if no animation set."""
if self.animation is None:
self.image = self.frames[self.direction][0]
else:
try:
self.animation.next()
except StopIteration:
self.animation = None
class Score(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.x, self.y = x, y
self.font = pygame.font.Font(None, 20)
self.color = pygame.Color('white')
self.background_color = pygame.Color('black')
self.last_score = -1
self.score=0
self.last_health = 100
self.health= 100
self.rect=pygame.Rect(x,y,0,0)
self.update()
def update(self):
update=False
if self.score != self.last_score:
self.last_score = self.score
update = True
if self.health != self.last_health:
self.last_heath = self.health
update = True
if update:
msg = "Score: %6d Health: %3d" % (self.score, self.health)
self.image = self.font.render(msg, 0, self.color,self.background_color)
self.rect = self.rect.union(self.image.get_rect().move(self.x, self.y))
return update
| {
"content_hash": "6de690eed97b12bdd27786787c0e4ad8",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 99,
"avg_line_length": 30.665236051502145,
"alnum_prop": 0.5444366689993002,
"repo_name": "mcnalu/dungeon-massacre",
"id": "f246e74e7b33e7637572b69f3e7202c218b7fa21",
"size": "7146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphicsdm.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17278"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "indicator.number"
_path_str = "indicator.number.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Set the font used to display main number
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.indicator.number.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.indicator.number.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.number.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "da58244e3fa0b04bbed4017ee4064321",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 82,
"avg_line_length": 37.118942731277535,
"alnum_prop": 0.5583906954664135,
"repo_name": "plotly/plotly.py",
"id": "bb2f5acb06d769dc14ae2b3f4f821803239013c9",
"size": "8426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/indicator/number/_font.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""
tests.test_component_group
~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the group compoments.
"""
# pylint: disable=protected-access,too-many-public-methods
import unittest
from homeassistant.const import (
STATE_ON, STATE_OFF, STATE_HOME, STATE_UNKNOWN, ATTR_ICON, ATTR_HIDDEN)
import homeassistant.components.group as group
from tests.common import get_test_home_assistant
class TestComponentsGroup(unittest.TestCase):
""" Tests homeassistant.components.group module. """
def setUp(self): # pylint: disable=invalid-name
""" Init needed objects. """
self.hass = get_test_home_assistant()
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False)
self.group_entity_id = test_group.entity_id
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_setup_group_with_mixed_groupable_states(self):
""" Try to setup a group with mixed groupable states """
self.hass.states.set('device_tracker.Paulus', STATE_HOME)
group.Group(
self.hass, 'person_and_light',
['light.Bowl', 'device_tracker.Paulus'])
self.assertEqual(
STATE_ON,
self.hass.states.get(
group.ENTITY_ID_FORMAT.format('person_and_light')).state)
def test_setup_group_with_a_non_existing_state(self):
""" Try to setup a group with a non existing state """
grp = group.Group(
self.hass, 'light_and_nothing',
['light.Bowl', 'non.existing'])
self.assertEqual(STATE_ON, grp.state)
def test_setup_group_with_non_groupable_states(self):
self.hass.states.set('cast.living_room', "Plex")
self.hass.states.set('cast.bedroom', "Netflix")
grp = group.Group(
self.hass, 'chromecasts',
['cast.living_room', 'cast.bedroom'])
self.assertEqual(STATE_UNKNOWN, grp.state)
def test_setup_empty_group(self):
""" Try to setup an empty group. """
grp = group.Group(self.hass, 'nothing', [])
self.assertEqual(STATE_UNKNOWN, grp.state)
def test_monitor_group(self):
""" Test if the group keeps track of states. """
# Test if group setup in our init mode is ok
self.assertIn(self.group_entity_id, self.hass.states.entity_ids())
group_state = self.hass.states.get(self.group_entity_id)
self.assertEqual(STATE_ON, group_state.state)
self.assertTrue(group_state.attributes.get(group.ATTR_AUTO))
def test_group_turns_off_if_all_off(self):
"""
Test if the group turns off if the last device that was on turns off.
"""
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(self.group_entity_id)
self.assertEqual(STATE_OFF, group_state.state)
def test_group_turns_on_if_all_are_off_and_one_turns_on(self):
"""
Test if group turns on if all devices were turned off and one turns on.
"""
# Make sure all are off.
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.pool.block_till_done()
# Turn one on
self.hass.states.set('light.Ceiling', STATE_ON)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(self.group_entity_id)
self.assertEqual(STATE_ON, group_state.state)
def test_is_on(self):
""" Test is_on method. """
self.assertTrue(group.is_on(self.hass, self.group_entity_id))
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.pool.block_till_done()
self.assertFalse(group.is_on(self.hass, self.group_entity_id))
# Try on non existing state
self.assertFalse(group.is_on(self.hass, 'non.existing'))
def test_expand_entity_ids(self):
""" Test expand_entity_ids method. """
self.assertEqual(sorted(['light.ceiling', 'light.bowl']),
sorted(group.expand_entity_ids(
self.hass, [self.group_entity_id])))
def test_expand_entity_ids_does_not_return_duplicates(self):
""" Test that expand_entity_ids does not return duplicates. """
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.expand_entity_ids(
self.hass, [self.group_entity_id, 'light.Ceiling'])))
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.expand_entity_ids(
self.hass, ['light.bowl', self.group_entity_id])))
def test_expand_entity_ids_ignores_non_strings(self):
""" Test that non string elements in lists are ignored. """
self.assertEqual([], group.expand_entity_ids(self.hass, [5, True]))
def test_get_entity_ids(self):
""" Test get_entity_ids method. """
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.get_entity_ids(self.hass, self.group_entity_id)))
def test_get_entity_ids_with_domain_filter(self):
""" Test if get_entity_ids works with a domain_filter. """
self.hass.states.set('switch.AC', STATE_OFF)
mixed_group = group.Group(
self.hass, 'mixed_group', ['light.Bowl', 'switch.AC'], False)
self.assertEqual(
['switch.ac'],
group.get_entity_ids(
self.hass, mixed_group.entity_id, domain_filter="switch"))
def test_get_entity_ids_with_non_existing_group_name(self):
""" Tests get_entity_ids with a non existing group. """
self.assertEqual([], group.get_entity_ids(self.hass, 'non_existing'))
def test_get_entity_ids_with_non_group_state(self):
""" Tests get_entity_ids with a non group state. """
self.assertEqual([], group.get_entity_ids(self.hass, 'switch.AC'))
def test_group_being_init_before_first_tracked_state_is_set_to_on(self):
""" Test if the group turns on if no states existed and now a state it is
tracking is being added as ON. """
test_group = group.Group(
self.hass, 'test group', ['light.not_there_1'])
self.hass.states.set('light.not_there_1', STATE_ON)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(test_group.entity_id)
self.assertEqual(STATE_ON, group_state.state)
def test_group_being_init_before_first_tracked_state_is_set_to_off(self):
""" Test if the group turns off if no states existed and now a state it is
tracking is being added as OFF. """
test_group = group.Group(
self.hass, 'test group', ['light.not_there_1'])
self.hass.states.set('light.not_there_1', STATE_OFF)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(test_group.entity_id)
self.assertEqual(STATE_OFF, group_state.state)
def test_setup(self):
""" Test setup method. """
self.assertTrue(
group.setup(
self.hass,
{
group.DOMAIN: {
'second_group': {
'entities': 'light.Bowl, ' + self.group_entity_id,
'icon': 'mdi:work',
'view': True,
},
'test_group': 'hello.world,sensor.happy',
}
}))
group_state = self.hass.states.get(
group.ENTITY_ID_FORMAT.format('second_group'))
self.assertEqual(STATE_ON, group_state.state)
self.assertEqual(set((self.group_entity_id, 'light.bowl')),
set(group_state.attributes['entity_id']))
self.assertIsNone(group_state.attributes.get(group.ATTR_AUTO))
self.assertEqual('mdi:work',
group_state.attributes.get(ATTR_ICON))
self.assertTrue(group_state.attributes.get(group.ATTR_VIEW))
self.assertTrue(group_state.attributes.get(ATTR_HIDDEN))
group_state = self.hass.states.get(
group.ENTITY_ID_FORMAT.format('test_group'))
self.assertEqual(STATE_UNKNOWN, group_state.state)
self.assertEqual(set(('sensor.happy', 'hello.world')),
set(group_state.attributes['entity_id']))
self.assertIsNone(group_state.attributes.get(group.ATTR_AUTO))
self.assertIsNone(group_state.attributes.get(ATTR_ICON))
self.assertIsNone(group_state.attributes.get(group.ATTR_VIEW))
self.assertIsNone(group_state.attributes.get(ATTR_HIDDEN))
def test_groups_get_unique_names(self):
""" Two groups with same name should both have a unique entity id. """
grp1 = group.Group(self.hass, 'Je suis Charlie')
grp2 = group.Group(self.hass, 'Je suis Charlie')
self.assertNotEqual(grp1.entity_id, grp2.entity_id)
def test_expand_entity_ids_expands_nested_groups(self):
group.Group(self.hass, 'light', ['light.test_1', 'light.test_2'])
group.Group(self.hass, 'switch', ['switch.test_1', 'switch.test_2'])
group.Group(self.hass, 'group_of_groups', ['group.light',
'group.switch'])
self.assertEqual(
['light.test_1', 'light.test_2', 'switch.test_1', 'switch.test_2'],
sorted(group.expand_entity_ids(self.hass,
['group.group_of_groups'])))
| {
"content_hash": "82478f3a2d911417c9876c7b00350d68",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 82,
"avg_line_length": 39.77049180327869,
"alnum_prop": 0.5958367683429514,
"repo_name": "nnic/home-assistant",
"id": "8a3eeadcb145ede287242efda88be3c4fc3840ca",
"size": "9704",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/test_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1482064"
},
{
"name": "Python",
"bytes": "1790232"
},
{
"name": "Shell",
"bytes": "3570"
}
],
"symlink_target": ""
} |
"""
This module is to support *bbox_inches* option in savefig command.
"""
from __future__ import print_function
import warnings
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
def adjust_bbox(fig, format, bbox_inches):
"""
Temporarily adjust the figure so that only the specified area
(bbox_inches) is saved.
It modifies fig.bbox, fig.bbox_inches,
fig.transFigure._boxout, and fig.patch. While the figure size
changes, the scale of the original figure is conserved. A
function which restores the original values are returned.
"""
origBbox = fig.bbox
origBboxInches = fig.bbox_inches
_boxout = fig.transFigure._boxout
asp_list = []
locator_list = []
for ax in fig.axes:
pos = ax.get_position(original=False).frozen()
locator_list.append(ax.get_axes_locator())
asp_list.append(ax.get_aspect())
def _l(a, r, pos=pos):
return pos
ax.set_axes_locator(_l)
ax.set_aspect("auto")
def restore_bbox():
for ax, asp, loc in zip(fig.axes, asp_list, locator_list):
ax.set_aspect(asp)
ax.set_axes_locator(loc)
fig.bbox = origBbox
fig.bbox_inches = origBboxInches
fig.transFigure._boxout = _boxout
fig.transFigure.invalidate()
fig.patch.set_bounds(0, 0, 1, 1)
adjust_bbox_handler = _adjust_bbox_handler_d.get(format)
if adjust_bbox_handler is not None:
adjust_bbox_handler(fig, bbox_inches)
return restore_bbox
else:
warnings.warn("bbox_inches option for %s backend is not "
"implemented yet." % (format))
return None
def adjust_bbox_png(fig, bbox_inches):
"""
adjust_bbox for png (Agg) format
"""
tr = fig.dpi_scale_trans
_bbox = TransformedBbox(bbox_inches,
tr)
x0, y0 = _bbox.x0, _bbox.y0
fig.bbox_inches = Bbox.from_bounds(0, 0,
bbox_inches.width,
bbox_inches.height)
x0, y0 = _bbox.x0, _bbox.y0
w1, h1 = fig.bbox.width, fig.bbox.height
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0,
w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0 / w1, y0 / h1,
fig.bbox.width / w1, fig.bbox.height / h1)
def adjust_bbox_pdf(fig, bbox_inches):
"""
adjust_bbox for pdf & eps format
"""
if fig._cachedRenderer.__class__.__name__ == "RendererPgf":
tr = Affine2D().scale(fig.dpi)
f = 1.
else:
tr = Affine2D().scale(72)
f = 72. / fig.dpi
_bbox = TransformedBbox(bbox_inches, tr)
fig.bbox_inches = Bbox.from_bounds(0, 0,
bbox_inches.width,
bbox_inches.height)
x0, y0 = _bbox.x0, _bbox.y0
w1, h1 = fig.bbox.width * f, fig.bbox.height * f
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0,
w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0 / w1, y0 / h1,
fig.bbox.width / w1, fig.bbox.height / h1)
def process_figure_for_rasterizing(figure,
bbox_inches_restore, mode):
"""
This need to be called when figure dpi changes during the drawing
(e.g., rasterizing). It recovers the bbox and re-adjust it with
the new dpi.
"""
bbox_inches, restore_bbox = bbox_inches_restore
restore_bbox()
r = adjust_bbox(figure, mode,
bbox_inches)
return bbox_inches, r
_adjust_bbox_handler_d = {}
for format in ["png", "raw", "rgba", "jpg", "jpeg", "tiff"]:
_adjust_bbox_handler_d[format] = adjust_bbox_png
for format in ["pdf", "eps", "svg", "svgz"]:
_adjust_bbox_handler_d[format] = adjust_bbox_pdf
| {
"content_hash": "56325f977134149cac4a68cb5977e751",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 69,
"avg_line_length": 30.096296296296295,
"alnum_prop": 0.5673147920255969,
"repo_name": "lthurlow/Network-Grapher",
"id": "36c255b9fb922ecb73e8cd557211bda3df698df6",
"size": "4063",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/tight_bbox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6550"
}
],
"symlink_target": ""
} |
from os import remove
from os.path import isfile
from python_utility.configuration import Configuration
def test_set_get_remove() -> None:
configuration = Configuration()
# should be empty
assert configuration.contains('my-key') is False
assert configuration.get('my-key') == ''
# should contain something
configuration.set('my-key', 'my-value')
assert configuration.contains('my-key') is True
assert configuration.get('my-key') == 'my-value'
# should remove something
configuration.remove('my-key')
assert configuration.contains('my-key') is False
assert configuration.get('my-key') == ''
def test_get_nested() -> None:
configuration = Configuration('tests/fixture/nested.yaml')
assert configuration.contains('foo') is True
nested_result = configuration.get_nested('foo')
assert isinstance(nested_result, dict)
assert nested_result.get('bar') == 'baz'
nested_result_without_dictionary = configuration.get_nested('foo_no_dict')
assert isinstance(nested_result_without_dictionary, dict)
def ensure_file_does_not_exist(file: str):
if isfile(file):
remove(file)
assert isfile(file) is False
def test_save() -> None:
file = '/tmp/example.yml'
ensure_file_does_not_exist(file)
# file should be created on save
configuration = Configuration(file)
assert isfile(file) is False
configuration.save()
assert configuration.exists() is True
assert isfile(file) is True
# file should be gone
remove(file)
assert isfile(file) is False
def test_write_and_read():
file = '/tmp/example.yml'
ensure_file_does_not_exist(file)
# file should be created
output_file = Configuration(file)
output_file.set('my-key', 'my-value')
output_file.save()
# file should contain something
input_file = Configuration(file)
assert input_file.get('my-key') == 'my-value'
# file should be gone
remove(file)
assert isfile(file) is False
| {
"content_hash": "1685ac046e95d25c020b8ce7693ef967",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 78,
"avg_line_length": 27.424657534246574,
"alnum_prop": 0.6838161838161838,
"repo_name": "FunTimeCoding/python-utility",
"id": "31352e9d0e96e6ef10ead08bf20c5e5fc9283d91",
"size": "2002",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1004"
},
{
"name": "Dockerfile",
"bytes": "932"
},
{
"name": "Python",
"bytes": "34873"
},
{
"name": "SaltStack",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "56817"
},
{
"name": "Smarty",
"bytes": "389"
}
],
"symlink_target": ""
} |
import uuid
from oslo_config import cfg
from oslo_log import log
import tooz.coordination
from aodh.i18n import _LE, _LI
from aodh import utils
LOG = log.getLogger(__name__)
OPTS = [
cfg.StrOpt('backend_url',
default=None,
help='The backend URL to use for distributed coordination. If '
'left empty, per-deployment central agent and per-host '
'compute agent won\'t do workload '
'partitioning and will only function correctly if a '
'single instance of that service is running.'),
cfg.FloatOpt('heartbeat',
default=1.0,
help='Number of seconds between heartbeats for distributed '
'coordination.'),
cfg.FloatOpt('check_watchers',
default=10.0,
help='Number of seconds between checks to see if group '
'membership has changed')
]
class PartitionCoordinator(object):
"""Workload partitioning coordinator.
This class uses the `tooz` library to manage group membership.
To ensure that the other agents know this agent is still alive,
the `heartbeat` method should be called periodically.
Coordination errors and reconnects are handled under the hood, so the
service using the partition coordinator need not care whether the
coordination backend is down. The `extract_my_subset` will simply return an
empty iterable in this case.
"""
def __init__(self, backend_url, my_id=None):
self.backend_url = backend_url
self._coordinator = None
self._groups = set()
self._my_id = my_id or str(uuid.uuid4())
self._started = False
def start(self):
if self.backend_url:
try:
self._coordinator = tooz.coordination.get_coordinator(
self.backend_url, self._my_id)
self._coordinator.start()
self._started = True
LOG.info(_LI('Coordination backend started successfully.'))
except tooz.coordination.ToozError:
self._started = False
LOG.exception(_LE('Error connecting to coordination backend.'))
def stop(self):
if not self._coordinator:
return
for group in list(self._groups):
self.leave_group(group)
try:
self._coordinator.stop()
except tooz.coordination.ToozError:
LOG.exception(_LE('Error connecting to coordination backend.'))
finally:
self._coordinator = None
self._started = False
def is_active(self):
return self._coordinator is not None
def heartbeat(self):
if self._coordinator:
if not self._started:
# re-connect
self.start()
try:
self._coordinator.heartbeat()
except tooz.coordination.ToozError:
LOG.exception(_LE('Error sending a heartbeat to coordination '
'backend.'))
def watch_group(self, namespace, callback):
if self._coordinator:
self._coordinator.watch_join_group(namespace, callback)
self._coordinator.watch_leave_group(namespace, callback)
def run_watchers(self):
if self._coordinator:
self._coordinator.run_watchers()
def join_group(self, group_id):
if not self._coordinator or not self._started or not group_id:
return
while True:
try:
join_req = self._coordinator.join_group(group_id)
join_req.get()
LOG.info(_LI('Joined partitioning group %s'), group_id)
break
except tooz.coordination.MemberAlreadyExist:
return
except tooz.coordination.GroupNotCreated:
create_grp_req = self._coordinator.create_group(group_id)
try:
create_grp_req.get()
except tooz.coordination.GroupAlreadyExist:
pass
self._groups.add(group_id)
def leave_group(self, group_id):
if group_id not in self._groups:
return
if self._coordinator:
self._coordinator.leave_group(group_id)
self._groups.remove(group_id)
LOG.info(_LI('Left partitioning group %s'), group_id)
def _get_members(self, group_id):
if not self._coordinator:
return [self._my_id]
while True:
get_members_req = self._coordinator.get_members(group_id)
try:
return get_members_req.get()
except tooz.coordination.GroupNotCreated:
self.join_group(group_id)
def extract_my_subset(self, group_id, iterable):
"""Filters an iterable, returning only objects assigned to this agent.
We have a list of objects and get a list of active group members from
`tooz`. We then hash all the objects into buckets and return only
the ones that hashed into *our* bucket.
"""
if not group_id:
return iterable
if group_id not in self._groups:
self.join_group(group_id)
try:
members = self._get_members(group_id)
LOG.debug('Members of group: %s', members)
hr = utils.HashRing(members)
filtered = [v for v in iterable
if hr.get_node(str(v)) == self._my_id]
LOG.debug('My subset: %s', filtered)
return filtered
except tooz.coordination.ToozError:
LOG.exception(_LE('Error getting group membership info from '
'coordination backend.'))
return []
| {
"content_hash": "643dfa1ecaac58de926e00e7be7e0e5b",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 79,
"avg_line_length": 35.84662576687116,
"alnum_prop": 0.5723087455074448,
"repo_name": "sileht/aodh",
"id": "f77ba370233c36272e1c011af719df5b41b989a2",
"size": "6419",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aodh/coordination.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1061"
},
{
"name": "Python",
"bytes": "671310"
},
{
"name": "Shell",
"bytes": "17843"
}
],
"symlink_target": ""
} |
"""
Regression tests for Aqara AR004.
This device has a non-standard programmable stateless switch service that has a
service-label-index despite not being linked to a service-label.
https://github.com/home-assistant/core/pull/39090
"""
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.helpers import entity_registry as er
from tests.common import assert_lists_same, async_get_device_automations
from tests.components.homekit_controller.common import (
setup_accessories_from_file,
setup_test_accessories,
)
async def test_aqara_switch_setup(hass):
"""Test that a Aqara Switch can be correctly setup in HA."""
accessories = await setup_accessories_from_file(hass, "aqara_switch.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = er.async_get(hass)
battery_id = "sensor.programmable_switch_battery"
battery = entity_registry.async_get(battery_id)
assert battery.unique_id == "homekit-111a1111a1a111-5"
# The fixture file has 1 button and a battery
expected = [
{
"device_id": battery.device_id,
"domain": "sensor",
"entity_id": "sensor.programmable_switch_battery",
"platform": "device",
"type": "battery_level",
}
]
for subtype in ("single_press", "double_press", "long_press"):
expected.append(
{
"device_id": battery.device_id,
"domain": "homekit_controller",
"platform": "device",
"type": "button1",
"subtype": subtype,
}
)
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, battery.device_id
)
assert_lists_same(triggers, expected)
| {
"content_hash": "20ca5bdcf7b5f4cf26fe34ca6ae0f852",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 32.280701754385966,
"alnum_prop": 0.653804347826087,
"repo_name": "home-assistant/home-assistant",
"id": "945d950ecc93e4d4fdfc18eb524e1c43ff45ca42",
"size": "1840",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/homekit_controller/specific_devices/test_aqara_switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
'''
This module contains methods that are invoked from the c notifier class
'''
def read_notification(file):
pass
def write_notification(file):
pass
def delete_notification(file):
pass
| {
"content_hash": "ed4b0b5455afbd01859962597b7a210f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 71,
"avg_line_length": 16.583333333333332,
"alnum_prop": 0.7236180904522613,
"repo_name": "NucleaPeon/cytify",
"id": "14e3f105fc3694f70b73085c49584d33dbf59fd0",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1062"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
find_xpath_attr,
unified_strdate,
get_element_by_id,
get_element_by_attribute,
int_or_none,
qualities,
)
# There are different sources of video in arte.tv, the extraction process
# is different for each one. The videos usually expire in 7 days, so we can't
# add tests.
class ArteTvIE(InfoExtractor):
_VALID_URL = r'http://videos\.arte\.tv/(?P<lang>fr|de)/.*-(?P<id>.*?)\.html'
IE_NAME = 'arte.tv'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
lang = mobj.group('lang')
video_id = mobj.group('id')
ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
ref_xml_doc = self._download_xml(
ref_xml_url, video_id, note='Downloading metadata')
config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang)
config_xml_url = config_node.attrib['ref']
config = self._download_xml(
config_xml_url, video_id, note='Downloading configuration')
formats = [{
'format_id': q.attrib['quality'],
# The playpath starts at 'mp4:', if we don't manually
# split the url, rtmpdump will incorrectly parse them
'url': q.text.split('mp4:', 1)[0],
'play_path': 'mp4:' + q.text.split('mp4:', 1)[1],
'ext': 'flv',
'quality': 2 if q.attrib['quality'] == 'hd' else 1,
} for q in config.findall('./urls/url')]
self._sort_formats(formats)
title = config.find('.//name').text
thumbnail = config.find('.//firstThumbnailUrl').text
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
class ArteTVPlus7IE(InfoExtractor):
IE_NAME = 'arte.tv:+7'
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
@classmethod
def _extract_url_info(cls, url):
mobj = re.match(cls._VALID_URL, url)
lang = mobj.group('lang')
# This is not a real id, it can be for example AJT for the news
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
video_id = mobj.group('id')
return video_id, lang
def _real_extract(self, url):
video_id, lang = self._extract_url_info(url)
webpage = self._download_webpage(url, video_id)
return self._extract_from_webpage(webpage, video_id, lang)
def _extract_from_webpage(self, webpage, video_id, lang):
json_url = self._html_search_regex(
[r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'],
webpage, 'json vp url')
return self._extract_from_json_url(json_url, video_id, lang)
def _extract_from_json_url(self, json_url, video_id, lang):
info = self._download_json(json_url, video_id)
player_info = info['videoJsonPlayer']
upload_date_str = player_info.get('shootingDate')
if not upload_date_str:
upload_date_str = player_info.get('VDA', '').split(' ')[0]
title = player_info['VTI'].strip()
subtitle = player_info.get('VSU', '').strip()
if subtitle:
title += ' - %s' % subtitle
info_dict = {
'id': player_info['VID'],
'title': title,
'description': player_info.get('VDE'),
'upload_date': unified_strdate(upload_date_str),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
}
qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
formats = []
for format_id, format_dict in player_info['VSR'].items():
f = dict(format_dict)
versionCode = f.get('versionCode')
langcode = {
'fr': 'F',
'de': 'A',
}.get(lang, lang)
lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode]
lang_pref = (
None if versionCode is None else (
10 if any(re.match(r, versionCode) for r in lang_rexs)
else -10))
source_pref = 0
if versionCode is not None:
# The original version with subtitles has lower relevance
if re.match(r'VO-ST(F|A)', versionCode):
source_pref -= 10
# The version with sourds/mal subtitles has also lower relevance
elif re.match(r'VO?(F|A)-STM\1', versionCode):
source_pref -= 9
format = {
'format_id': format_id,
'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
'language_preference': lang_pref,
'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'tbr': int_or_none(f.get('bitrate')),
'quality': qfunc(f.get('quality')),
'source_preference': source_pref,
}
if f.get('mediaType') == 'rtmp':
format['url'] = f['streamer']
format['play_path'] = 'mp4:' + f['url']
format['ext'] = 'flv'
else:
format['url'] = f['url']
formats.append(format)
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
# It also uses the arte_vp_url url from the webpage to extract the information
class ArteTVCreativeIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:creative'
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/(?:magazine?/)?(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
'info_dict': {
'id': '72176',
'ext': 'mp4',
'title': 'Folge 2 - Corporate Design',
'upload_date': '20131004',
},
}, {
'url': 'http://creative.arte.tv/fr/Monty-Python-Reunion',
'info_dict': {
'id': '160676',
'ext': 'mp4',
'title': 'Monty Python live (mostly)',
'description': 'Événement ! Quarante-cinq ans après leurs premiers succès, les légendaires Monty Python remontent sur scène.\n',
'upload_date': '20140805',
}
}]
class ArteTVFutureIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:future'
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
_TEST = {
'url': 'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
'info_dict': {
'id': '5201',
'ext': 'mp4',
'title': 'Les champignons au secours de la planète',
'upload_date': '20131101',
},
}
def _real_extract(self, url):
anchor_id, lang = self._extract_url_info(url)
webpage = self._download_webpage(url, anchor_id)
row = get_element_by_id(anchor_id, webpage)
return self._extract_from_webpage(row, anchor_id, lang)
class ArteTVDDCIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:ddc'
_VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
def _real_extract(self, url):
video_id, lang = self._extract_url_info(url)
if lang == 'folge':
lang = 'de'
elif lang == 'emission':
lang = 'fr'
webpage = self._download_webpage(url, video_id)
scriptElement = get_element_by_attribute('class', 'visu_video_block', webpage)
script_url = self._html_search_regex(r'src="(.*?)"', scriptElement, 'script url')
javascriptPlayerGenerator = self._download_webpage(script_url, video_id, 'Download javascript player generator')
json_url = self._search_regex(r"json_url=(.*)&rendering_place.*", javascriptPlayerGenerator, 'json url')
return self._extract_from_json_url(json_url, video_id, lang)
class ArteTVConcertIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:concert'
_VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>de|fr)/(?P<id>.+)'
_TEST = {
'url': 'http://concert.arte.tv/de/notwist-im-pariser-konzertclub-divan-du-monde',
'md5': '9ea035b7bd69696b67aa2ccaaa218161',
'info_dict': {
'id': '186',
'ext': 'mp4',
'title': 'The Notwist im Pariser Konzertclub "Divan du Monde"',
'upload_date': '20140128',
'description': 'md5:486eb08f991552ade77439fe6d82c305',
},
}
class ArteTVEmbedIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:embed'
_VALID_URL = r'''(?x)
http://www\.arte\.tv
/playerv2/embed\.php\?json_url=
(?P<json_url>
http://arte\.tv/papi/tvguide/videos/stream/player/
(?P<lang>[^/]+)/(?P<id>[^/]+)[^&]*
)
'''
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
lang = mobj.group('lang')
json_url = mobj.group('json_url')
return self._extract_from_json_url(json_url, video_id, lang)
| {
"content_hash": "a5bb4f4dc6428ca46c5c357326dd57ab",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 140,
"avg_line_length": 37.40637450199203,
"alnum_prop": 0.5419107466183832,
"repo_name": "janusnic/youtube-dl-GUI",
"id": "929dd3cc5550beb1b2da8874763084b5146d2f33",
"size": "9415",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "youtube_dl/extractor/arte.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Inno Setup",
"bytes": "7102"
},
{
"name": "Python",
"bytes": "2064276"
}
],
"symlink_target": ""
} |
"""
tcp.py
Thread Criticality Predictor [bhattacharjee2009tcpfdpparmicm].
Keeps track of memory-related CPI component per core.
Defines a SimUtil command so the application can read TCP values per core.
From application, call SimUtil(SIM_USER_TCP, <coreid>) to get a core's absolute criticality metric (0..1000)
"""
import sim
SIM_USER_TCP = 0x0be00001
INTERVAL = 100000 # in ns
ncores = sim.config.ncores
CPI_MEM = (
"cpiDataCacheL2", "cpiDataCacheL2_S", "cpiDataCacheL3", "cpiDataCacheL3_S",
"cpiDataCachecache-remote", "cpiDataCachedram-local", "cpiDataCachedram-remote",
"cpiDataCacheunknown",
)
class Tcp:
def setup(self, args):
self.sd = sim.util.StatsDelta()
self.stats = {
'time': [ self.sd.getter("performance_model", core, "elapsed_time") for core in range(ncores) ],
'instrs': [ self.sd.getter("performance_model", core, "instruction_count") for core in range(ncores) ],
'cpimem': [ [ self.sd.getter("interval_timer", core, cpi) for cpi in CPI_MEM ] for core in range(ncores) ],
}
self.tcp = [ None for core in range(ncores) ]
sim.util.Every(INTERVAL * sim.util.Time.NS, self.periodic, statsdelta = self.sd, roi_only = True)
sim.util.register_command(SIM_USER_TCP, self.get_tcp)
def periodic(self, time, time_delta):
for core in range(ncores):
cycles = self.stats['time'][core].delta * sim.dvfs.get_frequency(core) / 1e9 # convert fs to cycles
instrs = self.stats['instrs'][core].delta
cpimem = sum([ c.delta for c in self.stats['cpimem'][core] ])
self.tcp[core] = int(1000 * cpimem / time_delta)
def get_tcp(self, core_caller, arg):
core = arg
if 0 <= core < ncores:
return self.tcp[core]
else:
return None
sim.util.register(Tcp())
| {
"content_hash": "f9a41936600c3fa97e5c91bbfb9fc8e4",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 113,
"avg_line_length": 35.24,
"alnum_prop": 0.6793416572077186,
"repo_name": "yonggang985/Sniper",
"id": "b02d9b4aa47369b51e60b7b012b3ea839121b75b",
"size": "1762",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/tcp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "90653"
},
{
"name": "C++",
"bytes": "1722452"
},
{
"name": "Makefile",
"bytes": "21654"
},
{
"name": "Objective-C",
"bytes": "645"
},
{
"name": "Python",
"bytes": "103923"
}
],
"symlink_target": ""
} |
from vtk import *
import os.path
# Set database parameters
data_dir = "../../../../VTKData/Data/Infovis/SQLite/"
if not os.path.exists( data_dir):
data_dir = "../../../../../VTKData/Data/Infovis/SQLite/"
if not os.path.exists( data_dir):
data_dir = "../../../../../../VTKData/Data/Infovis/SQLite/"
sqlite_file = data_dir + "temperatures.db"
databaseToTable = vtkSQLDatabaseTableSource()
databaseToTable.SetURL("sqlite://" + sqlite_file)
# Pull the first data set from the database
databaseToTable.SetQuery("select * from main_tbl where CompId==2")
# Calculate primary descriptive statistics for first batch
print "# Calculate primary model of descriptive statistics for first data set:"
ds1 = vtkDescriptiveStatistics()
ds1.AddInputConnection( databaseToTable.GetOutputPort() )
ds1.AddColumn("Temp1")
ds1.AddColumn("Temp2")
ds1.SetLearnOption( 1 )
ds1.SetDeriveOption( 0 )
ds1.SetAssessOption( 0 )
ds1.SetTestOption( 0 )
ds1.Update()
# Show primary descriptive statistics for first batch
dStats1 = ds1.GetOutputDataObject( 1 )
dPrimary1 = dStats1.GetBlock( 0 )
dPrimary1.Dump( 15 )
print
# Pull the second data set from the database
databaseToTable.SetQuery("select * from main_tbl where CompId==3")
# Calculate primary descriptive statistics for second batch
print "# Calculate primary model of descriptive statistics for second data set:"
ds2 = vtkDescriptiveStatistics()
ds2.AddInputConnection( databaseToTable.GetOutputPort() )
ds2.AddColumn("Temp1")
ds2.AddColumn("Temp2")
ds2.SetLearnOption( 1 )
ds2.SetDeriveOption( 0 )
ds2.SetAssessOption( 0 )
ds2.SetTestOption( 0 )
ds2.Update()
# Show primary descriptive statistics for second batch
dStats2 = ds2.GetOutputDataObject( 1 )
dPrimary2 = dStats2.GetBlock( 0 )
dPrimary2.Dump( 15 )
print
# Finally aggregate both models to get a new primary model for the whole ensemble
print "# Aggregate both primary models:"
collection = vtkDataObjectCollection()
collection.AddItem( dStats1 )
collection.AddItem( dStats2 )
ds = vtkDescriptiveStatistics()
aggregated = vtkMultiBlockDataSet()
ds.Aggregate( collection, aggregated )
dPrimary = aggregated.GetBlock( 0 )
dPrimary.Dump( 15 )
print
# Calculate derived model for whole ensemble
print "# Now calculating derived statistics for whole ensemble:"
ds.SetInputData( 2, aggregated )
ds.SetLearnOption( 0 )
ds.SetDeriveOption( 1 )
ds.SetAssessOption( 0 )
ds.SetTestOption( 0 )
ds.Update()
dStats = ds.GetOutputDataObject( 1 )
dDerived = dStats.GetBlock( 1 )
dDerived.Dump( 15 )
print
# Pull entire data set from the database
databaseToTable.SetQuery("select * from main_tbl")
# Verify with calculation for whole ensemble at once
print "# Finally verifying by directly calculating statistics for whole ensemble:"
ds0 = vtkDescriptiveStatistics()
ds0.AddInputConnection( databaseToTable.GetOutputPort() )
ds0.AddColumn("Temp1")
ds0.AddColumn("Temp2")
ds0.SetLearnOption( 1 )
ds0.SetDeriveOption( 1 )
ds0.SetAssessOption( 0 )
ds0.SetTestOption( 0 )
ds0.Update()
# Show all descriptive statistics for whole ensemble
dStats0 = ds0.GetOutputDataObject( 1 )
dPrimary0 = dStats0.GetBlock( 0 )
dPrimary0.Dump( 15 )
dDerived0 = dStats0.GetBlock( 1 )
dDerived0.Dump( 15 )
print
| {
"content_hash": "ebd5b5a4d624c6c851cbb6a7a09bf2d6",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 82,
"avg_line_length": 31.147058823529413,
"alnum_prop": 0.7645577588920365,
"repo_name": "ashray/VTK-EVM",
"id": "a2220e49d1730343ad74d880e322f86eae894a5a",
"size": "3199",
"binary": false,
"copies": "5",
"ref": "refs/heads/yiq",
"path": "Examples/Infovis/Python/aggregate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "46581039"
},
{
"name": "C++",
"bytes": "68029728"
},
{
"name": "CMake",
"bytes": "1565141"
},
{
"name": "CSS",
"bytes": "186729"
},
{
"name": "Cuda",
"bytes": "29062"
},
{
"name": "D",
"bytes": "2081"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "GLSL",
"bytes": "184514"
},
{
"name": "Groff",
"bytes": "65394"
},
{
"name": "HTML",
"bytes": "286597"
},
{
"name": "Java",
"bytes": "196895"
},
{
"name": "JavaScript",
"bytes": "1111664"
},
{
"name": "Lex",
"bytes": "45258"
},
{
"name": "Objective-C",
"bytes": "68753"
},
{
"name": "Objective-C++",
"bytes": "257535"
},
{
"name": "Pascal",
"bytes": "3255"
},
{
"name": "Perl",
"bytes": "173168"
},
{
"name": "Prolog",
"bytes": "4406"
},
{
"name": "Python",
"bytes": "15938368"
},
{
"name": "Shell",
"bytes": "62526"
},
{
"name": "Smarty",
"bytes": "1325"
},
{
"name": "Tcl",
"bytes": "1888786"
},
{
"name": "Yacc",
"bytes": "174503"
}
],
"symlink_target": ""
} |
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_mini_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_mini_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
output_format="dense",
label_name="motor_speed",
label_type="float",
)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", early_stop="diff", max_iter=3,
penalty="None", optimizer="sgd", tol=0.001,
alpha=0.01, batch_size=-1, learning_rate=0.15,
decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"},
stepwise_param={"score_name": "AIC", "direction": "backward",
"need_stepwise": True, "max_step": 3, "nvmin": 2
})
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=intersection_0.output.data))
pipeline.compile()
pipeline.fit()
# print(pipeline.get_component("hetero_linr_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| {
"content_hash": "a06abd977c6efb18bf52259a030449c8",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 120,
"avg_line_length": 40.7887323943662,
"alnum_prop": 0.6301795580110497,
"repo_name": "FederatedAI/FATE",
"id": "9b404c8922544bc6a948a1755173ad43b0b8b083",
"size": "3513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pipeline/hetero_stepwise/pipeline-stepwise-linr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
} |
"""Python client for pw_transfer integration test."""
import logging
import socket
import sys
from google.protobuf import text_format
from pw_hdlc.rpc import HdlcRpcClient, default_channels
from pw_status import Status
import pw_transfer
from pigweed.pw_transfer import transfer_pb2
from pigweed.pw_transfer.integration_test import config_pb2
_LOG = logging.getLogger('pw_transfer_integration_test_python_client')
_LOG.level = logging.DEBUG
_LOG.addHandler(logging.StreamHandler(sys.stdout))
HOSTNAME: str = "localhost"
def _main() -> int:
if len(sys.argv) != 2:
_LOG.critical("Usage: PORT")
return 1
# The port is passed via the command line.
try:
port = int(sys.argv[1])
except:
_LOG.critical("Invalid port specified.")
return 1
# Load the config from stdin.
try:
text_config = sys.stdin.buffer.read()
config = text_format.Parse(text_config, config_pb2.ClientConfig())
except Exception as e:
_LOG.critical("Failed to parse config file from stdin: %s", e)
return 1
# Open a connection to the server.
try:
rpc_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
rpc_socket.connect((HOSTNAME, port))
except:
_LOG.critical("Failed to connect to server at %s:%d", HOSTNAME, port)
return 1
# Initialize an RPC client over the socket and set up the pw_transfer manager.
rpc_client = HdlcRpcClient(
lambda: rpc_socket.recv(4096), [transfer_pb2],
default_channels(lambda data: rpc_socket.sendall(data)),
lambda data: _LOG.info("%s", str(data)))
transfer_service = rpc_client.rpcs().pw.transfer.Transfer
transfer_manager = pw_transfer.Manager(
transfer_service,
default_response_timeout_s=config.chunk_timeout_ms / 1000,
initial_response_timeout_s=config.initial_chunk_timeout_ms / 1000,
max_retries=config.max_retries,
default_protocol_version=pw_transfer.ProtocolVersion.LATEST,
)
# Perform the requested transfer actions.
for action in config.transfer_actions:
protocol_version = pw_transfer.ProtocolVersion(
int(action.protocol_version))
# Default to the latest protocol version if none is specified.
if protocol_version == pw_transfer.ProtocolVersion.UNKNOWN:
protocol_version = pw_transfer.ProtocolVersion.LATEST
if (action.transfer_type ==
config_pb2.TransferAction.TransferType.WRITE_TO_SERVER):
try:
with open(action.file_path, 'rb') as f:
data = f.read()
except:
_LOG.critical("Failed to read input file '%s'",
action.file_path)
return 1
try:
transfer_manager.write(action.resource_id,
data,
protocol_version=protocol_version)
except pw_transfer.client.Error as e:
if e.status != Status(action.expected_status):
_LOG.exception(
"Unexpected error encountered during write transfer")
return 1
except:
_LOG.exception("Transfer (write to server) failed")
return 1
elif (action.transfer_type ==
config_pb2.TransferAction.TransferType.READ_FROM_SERVER):
try:
data = transfer_manager.read(action.resource_id,
protocol_version=protocol_version)
except pw_transfer.client.Error as e:
if e.status != Status(action.expected_status):
_LOG.exception(
"Unexpected error encountered during read transfer")
return 1
continue
except:
_LOG.exception("Transfer (read from server) failed")
return 1
try:
with open(action.file_path, 'wb') as f:
f.write(data)
except:
_LOG.critical("Failed to write output file '%s'",
action.file_path)
return 1
else:
_LOG.critical("Unknown transfer type: %d", action.transfer_type)
return 1
_LOG.info("All transfers completed successfully")
return 0
if __name__ == '__main__':
sys.exit(_main())
| {
"content_hash": "ae9ca560a74aa75902386a2167c64be2",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 82,
"avg_line_length": 36.152,
"alnum_prop": 0.5817658774065059,
"repo_name": "google/pigweed",
"id": "8bb4ec4f7aea31368504273daade72807452eb05",
"size": "5126",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pw_transfer/integration_test/python_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8654"
},
{
"name": "C",
"bytes": "487991"
},
{
"name": "C++",
"bytes": "6119052"
},
{
"name": "CMake",
"bytes": "288698"
},
{
"name": "CSS",
"bytes": "4820"
},
{
"name": "Go",
"bytes": "18932"
},
{
"name": "HTML",
"bytes": "1194"
},
{
"name": "Java",
"bytes": "327548"
},
{
"name": "JavaScript",
"bytes": "12482"
},
{
"name": "Jinja",
"bytes": "2467"
},
{
"name": "Python",
"bytes": "3578966"
},
{
"name": "Rust",
"bytes": "645"
},
{
"name": "SCSS",
"bytes": "1382"
},
{
"name": "Shell",
"bytes": "22974"
},
{
"name": "Smarty",
"bytes": "692"
},
{
"name": "Starlark",
"bytes": "489444"
},
{
"name": "TypeScript",
"bytes": "235169"
}
],
"symlink_target": ""
} |
from PySide import QtGui, QtCore
import sys
import time
from traits.api import HasTraits, Instance, on_trait_change, Range
from traitsui.api import View, Item
from mayavi.core.ui.api import MayaviScene, MlabSceneModel, \
SceneEditor
# Create the data.
from sympy import sympify
from predefined_surfaces import *
D={'v':v,'u':u} # we use it in order to keep u,v as variable during the
#use of simpify
onScreen=Torus
################################################################################
################################################################################
#The actual visualization
class Visualization(HasTraits):
scene = Instance(MlabSceneModel, ())
change=Range(0.0, 100.0, value=1.0)
@on_trait_change('change')
def update_plot(self):
# This function is called when the view is opened. We don't
# populate the scene when the view is not yet open, as some
# VTK features require a GLContext.
# We can do normal mlab calls on the embedded scene.
# todo use set from mlab
self.scene.mlab.clf() #clear scene
#do the drawing
onScreen.cal()
try:
self.scene.mlab.mesh(onScreen.X, onScreen.Y, onScreen.Z,scalars=onScreen.Gauss_Curvature)
#add the colorbar
self.scene.mlab.colorbar(orientation='horizontal',title='Gaussian Curvature')
except AssertionError: # in same cases it fails so we just plot
#the surface with one color
# TODO: minimize these cases
print 'error'
self.scene.mlab.mesh(onScreen.X, onScreen.Y, onScreen.Z,color=(0,1,0))
# adjust camera
self.scene.mlab.view(azimuth=45, elevation=60, distance=30)
#print 'scene updated'
# the layout of the dialog screated
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=250, width=400, show_label=False),
resizable=True # We need this to resize with the parent widget
)
################################################################################
# The QWidget containing the visualization, this is pure PyQt4 code.
class MayaviQWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
layout.setContentsMargins(0,0,0,0)
layout.setSpacing(0)
self.visualization = Visualization()
# The edit_traits call will generate the widget to embed.
self.ui = self.visualization.edit_traits(parent=self,
kind='subpanel').control
layout.addWidget(self.ui)
self.ui.setParent(self)
################################################################################
# The main Widget containing all the others
class MainWindow(QtGui.QMainWindow):
""" Our Main Window class
"""
def __init__(self):
""" Constructor Function
"""
QtGui.QMainWindow.__init__(self)
self.setWindowTitle("Exploring the Gaussian Curvature")
#self.setWindowIcon(QIcon('appicon.png'))
self.resize(800, 600)
self.center()
self.StatusBar = QtGui.QStatusBar()
self.setStatusBar(self.StatusBar)
self.StatusBar.showMessage('Ready')
self.MainWidget = QtGui.QWidget()
self.create_MainWidget()
self.setCentralWidget(self.MainWidget)
self.connections()
def create_MainWidget(self):
self.MainWidget.MyLayout = QtGui.QHBoxLayout()
#example combobox
self.MainWidget.combo = QtGui.QComboBox()
self.MainWidget.combo.addItem("Torus")
self.MainWidget.combo.addItem("Helix")
self.MainWidget.combo.addItem("Ellipsoid")
self.MainWidget.combo.addItem("Elliptic Paraboloid")
self.MainWidget.combo.addItem("Hyperboloid of one sheet")
self.MainWidget.combo.addItem("Hyperboloids of two sheets")
self.MainWidget.combo.addItem("Hyperboloids of two sheets")
#self.MainWidget.ApplyButton.clicked.connect(self.MainWidget.onApply)
self.MainWidget.checkbox = QtGui.QCheckBox("Custom Surface")
self.MainWidget.custom_controls=[] # list holding all the controls
#that need to be disabled when
#custom checkbox is not enabled
#Input Layout
self.MainWidget.InputLayout=QtGui.QFormLayout()
self.MainWidget.XLabel=QtGui.QLabel()
self.MainWidget.XLabel.setText("X(u,v)")
self.MainWidget.custom_controls.append(self.MainWidget.XLabel)
self.MainWidget.XFormula= QtGui.QLineEdit()
self.MainWidget.custom_controls.append(self.MainWidget.XFormula)
self.MainWidget.YLabel=QtGui.QLabel()
self.MainWidget.YLabel.setText("Y(u,v)")
self.MainWidget.custom_controls.append(self.MainWidget.YLabel)
self.MainWidget.YFormula= QtGui.QLineEdit()
self.MainWidget.custom_controls.append(self.MainWidget.YFormula)
self.MainWidget.ZLabel=QtGui.QLabel()
self.MainWidget.ZLabel.setText("Z(u,v)")
self.MainWidget.custom_controls.append(self.MainWidget.ZLabel)
self.MainWidget.ZFormula= QtGui.QLineEdit()
self.MainWidget.custom_controls.append(self.MainWidget.ZFormula)
self.MainWidget.Formulas=[self.MainWidget.XFormula,self.MainWidget.YFormula,self.MainWidget.ZFormula]
self.MainWidget.ApplyButton=QtGui.QPushButton()
self.MainWidget.ApplyButton.setText("Apply")
self.MainWidget.custom_controls.append(self.MainWidget.ApplyButton)
self.MainWidget.InputLayout.addRow(self.MainWidget.combo)
self.MainWidget.InputLayout.addRow(self.MainWidget.checkbox)
self.MainWidget.InputLayout.addRow(self.MainWidget.XLabel, self.MainWidget.XFormula)
self.MainWidget.InputLayout.addRow(self.MainWidget.YLabel, self.MainWidget.YFormula)
self.MainWidget.InputLayout.addRow(self.MainWidget.ZLabel, self.MainWidget.ZFormula)
self.MainWidget.InputLayout.addRow(self.MainWidget.ApplyButton)
# u-v ranges
self.MainWidget.InputRangeLayout=QtGui.QGridLayout()
self.MainWidget.uLabel=QtGui.QLabel()
self.MainWidget.uLabel.setText("u: from ")
self.MainWidget.InputRangeLayout.addWidget(self.MainWidget.uLabel,0,0)
self.MainWidget.custom_controls.append(self.MainWidget.uLabel)
self.MainWidget.umin= QtGui.QLineEdit()
self.MainWidget.InputRangeLayout.addWidget(self.MainWidget.umin,0,1)
self.MainWidget.custom_controls.append(self.MainWidget.umin)
self.MainWidget.u_toLabel=QtGui.QLabel()
self.MainWidget.u_toLabel.setText("to")
self.MainWidget.InputRangeLayout.addWidget(self.MainWidget.u_toLabel,0,2)
self.MainWidget.custom_controls.append(self.MainWidget.u_toLabel)
self.MainWidget.umax= QtGui.QLineEdit()
self.MainWidget.InputRangeLayout.addWidget(self.MainWidget.umax,0,3)
self.MainWidget.custom_controls.append(self.MainWidget.umax)
self.MainWidget.vLabel=QtGui.QLabel()
self.MainWidget.vLabel.setText("v: from ")
self.MainWidget.InputRangeLayout.addWidget(self.MainWidget.vLabel,1,0)
self.MainWidget.custom_controls.append(self.MainWidget.vLabel)
self.MainWidget.vmin= QtGui.QLineEdit()
self.MainWidget.InputRangeLayout.addWidget(self.MainWidget.vmin,1,1)
self.MainWidget.custom_controls.append(self.MainWidget.vmin)
self.MainWidget.v_toLabel=QtGui.QLabel()
self.MainWidget.v_toLabel.setText("to")
self.MainWidget.InputRangeLayout.addWidget(self.MainWidget.v_toLabel,1,2)
self.MainWidget.custom_controls.append(self.MainWidget.v_toLabel)
self.MainWidget.vmax= QtGui.QLineEdit()
self.MainWidget.InputRangeLayout.addWidget(self.MainWidget.vmax,1,3)
self.MainWidget.custom_controls.append(self.MainWidget.vmax)
self.MainWidget.u_v_range=[self.MainWidget.umin,self.MainWidget.umax,self.MainWidget.vmin,self.MainWidget.vmax]
#self.MainWidget.onCheckbox() #set the initial state of the custom controls
self.MainWidget.InputLayout.addRow(self.MainWidget.InputRangeLayout)
self.MainWidget.MyLayout.addLayout(self.MainWidget.InputLayout)
#insert mayaviwidget to the main window
self.MainWidget.mayavi_widget = MayaviQWidget()
self.MainWidget.MyLayout.addWidget(self.MainWidget.mayavi_widget,4)
self.MainWidget.setLayout(self.MainWidget.MyLayout)
def connections(self):
self.MainWidget.combo.activated.connect(self.onComboActivated)
self.MainWidget.checkbox.stateChanged.connect(self.onCheckbox)
self.MainWidget.ApplyButton.clicked.connect(self.onApply)
self.onCheckbox()
def center(self):
#http://zetcode.com/gui/pysidetutorial/firstprograms/
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def writeFormulas(self,Surface1):
"""when an example is chosen write the formulas in the input Forms
"""
for i in range(0,3):
self.MainWidget.Formulas[i].setText(str(Surface1.S[i]))
for i in range(0,4):
self.MainWidget.u_v_range[i].setText(str(Surface1.u_v_range[i]))
def onComboActivated(self):
global onScreen
text=self.MainWidget.combo.currentText()
if text=="Helix":
onScreen=Helix
elif text=="Hyperboloid of one sheet":
onScreen=Hyperboloid_one_sheet
elif text=="Torus":
onScreen=Torus
elif text=="Ellipsoid":
onScreen=Ellipsoid
elif text=="Elliptic Paraboloid":
onScreen=Elliptic_Paraboloid
self.StatusBar.showMessage("Processing...")
self.StatusBar.showMessage("Processing...")
time.sleep(4)
self.writeFormulas(onScreen)
self.MainWidget.mayavi_widget.visualization.update_plot()
self.StatusBar.showMessage("Ready!")
def onCheckbox(self):
if self.MainWidget.checkbox.isChecked():
self.MainWidget.combo.setDisabled(True)
for i in self.MainWidget.custom_controls:
i.setEnabled(True)
else:
self.MainWidget.combo.setEnabled(True)
for i in self.MainWidget.custom_controls:
i.setDisabled(True)
def onApply(self):
global onScreen
self.StatusBar.showMessage("Processing...")
self.StatusBar.showMessage("Processing...")
onScreen=Surface([sympify(self.MainWidget.XFormula.text(),D),
sympify(self.MainWidget.YFormula.text(),D),
sympify(self.MainWidget.ZFormula.text(),D) ],
umin=sympify(self.MainWidget.umin.text()),
umax=sympify(self.MainWidget.umax.text()),
vmin=sympify(self.MainWidget.vmin.text()),
vmax=sympify(self.MainWidget.vmax.text()))
self.MainWidget.mayavi_widget.visualization.update_plot()
self.StatusBar.showMessage("Ready!")
if __name__ =='__main__':
# Exception Handling
try:
#app = QtGui.QApplication(sys.argv)
app = QtGui.QApplication.instance()
myWidget = MainWindow()
myWidget.show()
app.exec_()
sys.exit(0)
except NameError:
print("Name Error:", sys.exc_info()[1])
except SystemExit:
print("Closing Window...")
except Exception:
print(sys.exc_info()[1])
except Error:
print("Error")
| {
"content_hash": "55172f268a8efcc5791b479108529ee7",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 119,
"avg_line_length": 40.00651465798045,
"alnum_prop": 0.6187103077674646,
"repo_name": "ChristosT/colour-surfaces",
"id": "f6572ab2157a45bd8af52441eea2a8e63c8f7197",
"size": "12282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Mayavi_app_Gauss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14288"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lemmatizer import LOOKUP
from .stop_words import STOP_WORDS
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ..norm_exceptions import BASE_NORMS
from ...language import Language
from ...attrs import LANG, NORM
from ...util import update_exc, add_lookups
class TurkishDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: 'tr'
lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS)
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
stop_words = STOP_WORDS
lemma_lookup = LOOKUP
class Turkish(Language):
lang = 'tr'
Defaults = TurkishDefaults
__all__ = ['Turkish']
| {
"content_hash": "0df167a329fedd68139cd213567a875d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 94,
"avg_line_length": 29.103448275862068,
"alnum_prop": 0.7476303317535545,
"repo_name": "recognai/spaCy",
"id": "9d1eedfe40660595ed26ba29a203f984c3025bbf",
"size": "859",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "spacy/lang/tr/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103274"
},
{
"name": "C++",
"bytes": "161734"
},
{
"name": "CSS",
"bytes": "42943"
},
{
"name": "HTML",
"bytes": "904913"
},
{
"name": "JavaScript",
"bytes": "17993"
},
{
"name": "Python",
"bytes": "199094758"
},
{
"name": "Shell",
"bytes": "1091"
}
],
"symlink_target": ""
} |
from openerp.osv import osv, fields
class res_partner(osv.Model):
_inherit = 'res.partner'
_order = 'display_name'
def _display_name_compute(self, cr, uid, ids, name, args, context=None):
context = dict(context or {})
context.pop('show_address', None)
return dict(self.name_get(cr, uid, ids, context=context))
_display_name_store_triggers = {
'res.partner': (lambda self,cr,uid,ids,context=None: self.search(cr, uid, [('id','child_of',ids)]),
['parent_id', 'is_company', 'name'], 10)
}
# indirection to avoid passing a copy of the overridable method when declaring the function field
_display_name = lambda self, *args, **kwargs: self._display_name_compute(*args, **kwargs)
_columns = {
# extra field to allow ORDER BY to match visible names
'display_name': fields.function(_display_name, type='char', string='Name', store=_display_name_store_triggers),
}
class account_invoice(osv.Model):
_inherit = 'account.invoice'
_columns = {
'commercial_partner_id': fields.related('partner_id', 'commercial_partner_id', string='Commercial Entity', type='many2one',
relation='res.partner', store=True, readonly=True,
help="The commercial entity that will be used on Journal Entries for this invoice")
}
| {
"content_hash": "36545ce02171ca30b742d480f04712f6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 131,
"avg_line_length": 44.5,
"alnum_prop": 0.6109550561797753,
"repo_name": "chjw8016/GreenOdoo7-haibao",
"id": "6247ca268065980ceebca2a76c7d4ef9bee62625",
"size": "2396",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "openerp/addons/account_report_company/account_report_company.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "90846"
},
{
"name": "CSS",
"bytes": "384369"
},
{
"name": "JavaScript",
"bytes": "1730589"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9394626"
},
{
"name": "Shell",
"bytes": "5172"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
} |
from django.conf.urls import url, include
from django.contrib.auth.decorators import login_required
from securedpi_locks import views
urlpatterns = [
url(r'^(?P<pk>\d+)/edit/$',
login_required(views.EditLockView.as_view()),
name='edit_lock'),
url(r'^(?P<pk>\d+)/delete/$',
login_required(views.DeleteLockView.as_view()),
name='delete_lock'),
url(r'^manual-unlock/(?P<pk>\d+)/$',
login_required(views.manual_action),
kwargs={'action': 'unlock'},
name='manual_unlock'),
url(r'^manual-lock/(?P<pk>\d+)/$',
login_required(views.manual_action),
kwargs={'action': 'lock'},
name='manual_lock'),
]
urlpatterns += [
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
| {
"content_hash": "1ed4c2815ad4bcb87fc507c25414e6db",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 60,
"avg_line_length": 30.51851851851852,
"alnum_prop": 0.5825242718446602,
"repo_name": "Secured-Pi/Secured-Pi",
"id": "b8f6e9e2c8ee286887fba76d49775a047c45b10b",
"size": "824",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging",
"path": "securedpi_locks/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "95"
},
{
"name": "HTML",
"bytes": "25485"
},
{
"name": "Python",
"bytes": "79801"
}
],
"symlink_target": ""
} |
"""
.. dialect:: mysql
:name: MySQL
Supported Versions and Features
-------------------------------
SQLAlchemy supports MySQL starting with version 4.1 through modern releases.
However, no heroic measures are taken to work around major missing
SQL features - if your server version does not support sub-selects, for
example, they won't work in SQLAlchemy either.
See the official MySQL documentation for detailed information about features
supported in any given server release.
.. _mysql_connection_timeouts:
Connection Timeouts
-------------------
MySQL features an automatic connection close behavior, for connections that
have been idle for eight hours or more. To circumvent having this issue, use
the ``pool_recycle`` option which controls the maximum age of any connection::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
.. _mysql_storage_engines:
CREATE TABLE arguments including Storage Engines
------------------------------------------------
MySQL's CREATE TABLE syntax includes a wide array of special options,
including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``,
``INSERT_METHOD``, and many more.
To accommodate the rendering of these arguments, specify the form
``mysql_argument_name="value"``. For example, to specify a table with
``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8``, and ``KEY_BLOCK_SIZE``
of ``1024``::
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8',
mysql_key_block_size="1024"
)
The MySQL dialect will normally transfer any keyword specified as
``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the
``CREATE TABLE`` statement. A handful of these names will render with a space
instead of an underscore; to support this, the MySQL dialect has awareness of
these particular names, which include ``DATA DIRECTORY``
(e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g.
``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g.
``mysql_index_directory``).
The most common argument is ``mysql_engine``, which refers to the storage
engine for the table. Historically, MySQL server installations would default
to ``MyISAM`` for this value, although newer versions may be defaulting
to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support
of transactions and foreign keys.
A :class:`.Table` that is created in a MySQL database with a storage engine
of ``MyISAM`` will be essentially non-transactional, meaning any
INSERT/UPDATE/DELETE statement referring to this table will be invoked as
autocommit. It also will have no support for foreign key constraints; while
the ``CREATE TABLE`` statement accepts foreign key options, when using the
``MyISAM`` storage engine these arguments are discarded. Reflecting such a
table will also produce no foreign key constraint information.
For fully atomic transactions as well as support for foreign key
constraints, all participating ``CREATE TABLE`` statements must specify a
transactional engine, which in the vast majority of cases is ``InnoDB``.
.. seealso::
`The InnoDB Storage Engine
<http://dev.mysql.com/doc/refman/5.0/en/innodb-storage-engine.html>`_ -
on the MySQL website.
Case Sensitivity and Table Reflection
-------------------------------------
MySQL has inconsistent support for case-sensitive identifier
names, basing support on specific details of the underlying
operating system. However, it has been observed that no matter
what case sensitivity behavior is present, the names of tables in
foreign key declarations are *always* received from the database
as all-lower case, making it impossible to accurately reflect a
schema where inter-related tables use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as
all lower case both within SQLAlchemy as well as on the MySQL
database itself, especially if database reflection features are
to be used.
.. _mysql_isolation_level:
Transaction Isolation Level
---------------------------
:func:`.create_engine` accepts an :paramref:`.create_engine.isolation_level`
parameter which results in the command ``SET SESSION
TRANSACTION ISOLATION LEVEL <level>`` being invoked for
every new connection. Valid values for this parameter are
``READ COMMITTED``, ``READ UNCOMMITTED``,
``REPEATABLE READ``, and ``SERIALIZABLE``::
engine = create_engine(
"mysql://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
.. versionadded:: 0.7.6
AUTO_INCREMENT Behavior
-----------------------
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
the first :class:`.Integer` primary key column which is not marked as a
foreign key::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by passing ``False`` to the
:paramref:`~.Column.autoincrement` argument of :class:`.Column`. This flag
can also be used to enable auto-increment on a secondary column in a
multi-column key for some storage engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
.. _mysql_unicode:
Unicode
-------
Charset Selection
~~~~~~~~~~~~~~~~~
Most MySQL DBAPIs offer the option to set the client character set for
a connection. This is typically delivered using the ``charset`` parameter
in the URL, such as::
e = create_engine("mysql+pymysql://scott:tiger@localhost/\
test?charset=utf8")
This charset is the **client character set** for the connection. Some
MySQL DBAPIs will default this to a value such as ``latin1``, and some
will make use of the ``default-character-set`` setting in the ``my.cnf``
file as well. Documentation for the DBAPI in use should be consulted
for specific behavior.
The encoding used for Unicode has traditionally been ``'utf8'``. However,
for MySQL versions 5.5.3 on forward, a new MySQL-specific encoding
``'utf8mb4'`` has been introduced. The rationale for this new encoding
is due to the fact that MySQL's utf-8 encoding only supports
codepoints up to three bytes instead of four. Therefore,
when communicating with a MySQL database
that includes codepoints more than three bytes in size,
this new charset is preferred, if supported by both the database as well
as the client DBAPI, as in::
e = create_engine("mysql+pymysql://scott:tiger@localhost/\
test?charset=utf8mb4")
At the moment, up-to-date versions of MySQLdb and PyMySQL support the
``utf8mb4`` charset. Other DBAPIs such as MySQL-Connector and OurSQL
may **not** support it as of yet.
In order to use ``utf8mb4`` encoding, changes to
the MySQL schema and/or server configuration may be required.
.. seealso::
`The utf8mb4 Character Set \
<http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html>`_ - \
in the MySQL documentation
Unicode Encoding / Decoding
~~~~~~~~~~~~~~~~~~~~~~~~~~~
All modern MySQL DBAPIs all offer the service of handling the encoding and
decoding of unicode data between the Python application space and the database.
As this was not always the case, SQLAlchemy also includes a comprehensive system
of performing the encode/decode task as well. As only one of these systems
should be in use at at time, SQLAlchemy has long included functionality
to automatically detect upon first connection whether or not the DBAPI is
automatically handling unicode.
Whether or not the MySQL DBAPI will handle encoding can usually be configured
using a DBAPI flag ``use_unicode``, which is known to be supported at least
by MySQLdb, PyMySQL, and MySQL-Connector. Setting this value to ``0``
in the "connect args" or query string will have the effect of disabling the
DBAPI's handling of unicode, such that it instead will return data of the
``str`` type or ``bytes`` type, with data in the configured charset::
# connect while disabling the DBAPI's unicode encoding/decoding
e = create_engine("mysql+mysqldb://scott:tiger@localhost/test?charset=utf8&use_unicode=0")
Current recommendations for modern DBAPIs are as follows:
* It is generally always safe to leave the ``use_unicode`` flag set at
its default; that is, don't use it at all.
* Under Python 3, the ``use_unicode=0`` flag should **never be used**.
SQLAlchemy under Python 3 generally assumes the DBAPI receives and returns
string values as Python 3 strings, which are inherently unicode objects.
* Under Python 2 with MySQLdb, the ``use_unicode=0`` flag will **offer
superior performance**, as MySQLdb's unicode converters under Python 2 only
have been observed to have unusually slow performance compared to SQLAlchemy's
fast C-based encoders/decoders.
In short: don't specify ``use_unicode`` *at all*, with the possible
exception of ``use_unicode=0`` on MySQLdb with Python 2 **only** for a
potential performance gain.
Ansi Quoting Style
------------------
MySQL features two varieties of identifier "quoting style", one using
backticks and the other using quotes, e.g. ```some_identifier``` vs.
``"some_identifier"``. All MySQL dialects detect which version
is in use by checking the value of ``sql_mode`` when a connection is first
established with a particular :class:`.Engine`. This quoting style comes
into play when rendering table and column names as well as when reflecting
existing database structures. The detection is entirely automatic and
no special configuration is needed to use either quoting style.
.. versionchanged:: 0.6 detection of ANSI quoting style is entirely automatic,
there's no longer any end-user ``create_engine()`` options in this regard.
MySQL SQL Extensions
--------------------
Many of the MySQL SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid MySQL statement can be executed as a string as well.
Some limited direct support for MySQL extensions to SQL is currently
available.
* SELECT pragma::
select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., mysql_limit=10)
rowcount Support
----------------
SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS``
flag, or whatever is equivalent for the target dialect, upon connection.
This setting is currently hardcoded.
.. seealso::
:attr:`.ResultProxy.rowcount`
CAST Support
------------
MySQL documents the CAST operator as available in version 4.0.2. When using
the SQLAlchemy :func:`.cast` function, SQLAlchemy
will not render the CAST token on MySQL before this version, based on server
version detection, instead rendering the internal expression directly.
CAST may still not be desirable on an early MySQL version post-4.0.2, as it
didn't add all datatype support until 4.1.1. If your application falls into
this narrow area, the behavior of CAST can be controlled using the
:ref:`sqlalchemy.ext.compiler_toplevel` system, as per the recipe below::
from sqlalchemy.sql.expression import Cast
from sqlalchemy.ext.compiler import compiles
@compiles(Cast, 'mysql')
def _check_mysql_version(element, compiler, **kw):
if compiler.dialect.server_version_info < (4, 1, 0):
return compiler.process(element.clause, **kw)
else:
return compiler.visit_cast(element, **kw)
The above function, which only needs to be declared once
within an application, overrides the compilation of the
:func:`.cast` construct to check for version 4.1.0 before
fully rendering CAST; else the internal element of the
construct is rendered directly.
.. _mysql_indexes:
MySQL Specific Index Options
----------------------------
MySQL-specific extensions to the :class:`.Index` construct are available.
Index Length
~~~~~~~~~~~~~
MySQL provides an option to create index entries with a certain length, where
"length" refers to the number of characters or bytes in each value which will
become part of the index. SQLAlchemy provides this feature via the
``mysql_length`` parameter::
Index('my_index', my_table.c.data, mysql_length=10)
Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4,
'b': 9})
Prefix lengths are given in characters for nonbinary string types and in bytes
for binary string types. The value passed to the keyword argument *must* be
either an integer (and, thus, specify the same prefix length value for all
columns of the index) or a dict in which keys are column names and values are
prefix length values for corresponding columns. MySQL only allows a length for
a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY, VARBINARY and
BLOB.
.. versionadded:: 0.8.2 ``mysql_length`` may now be specified as a dictionary
for use with composite indexes.
Index Types
~~~~~~~~~~~~~
Some MySQL storage engines permit you to specify an index type when creating
an index or primary key constraint. SQLAlchemy provides this feature via the
``mysql_using`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_using='hash')
As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
PrimaryKeyConstraint("data", mysql_using='hash')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
type for your MySQL storage engine.
More information can be found at:
http://dev.mysql.com/doc/refman/5.0/en/create-index.html
http://dev.mysql.com/doc/refman/5.0/en/create-table.html
.. _mysql_foreign_keys:
MySQL Foreign Keys
------------------
MySQL's behavior regarding foreign keys has some important caveats.
Foreign Key Arguments to Avoid
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL does not support the foreign key arguments "DEFERRABLE", "INITIALLY",
or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with
:class:`.ForeignKeyConstraint` or :class:`.ForeignKey` will have the effect of
these keywords being rendered in a DDL expression, which will then raise an
error on MySQL. In order to use these keywords on a foreign key while having
them ignored on a MySQL backend, use a custom compile rule::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import ForeignKeyConstraint
@compiles(ForeignKeyConstraint, "mysql")
def process(element, compiler, **kw):
element.deferrable = element.initially = None
return compiler.visit_foreign_key_constraint(element, **kw)
.. versionchanged:: 0.9.0 - the MySQL backend no longer silently ignores
the ``deferrable`` or ``initially`` keyword arguments of
:class:`.ForeignKeyConstraint` and :class:`.ForeignKey`.
The "MATCH" keyword is in fact more insidious, and is explicitly disallowed
by SQLAlchemy in conjunction with the MySQL backend. This argument is
silently ignored by MySQL, but in addition has the effect of ON UPDATE and ON
DELETE options also being ignored by the backend. Therefore MATCH should
never be used with the MySQL backend; as is the case with DEFERRABLE and
INITIALLY, custom compilation rules can be used to correct a MySQL
ForeignKeyConstraint at DDL definition time.
.. versionadded:: 0.9.0 - the MySQL backend will raise a
:class:`.CompileError` when the ``match`` keyword is used with
:class:`.ForeignKeyConstraint` or :class:`.ForeignKey`.
Reflection of Foreign Key Constraints
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Not all MySQL storage engines support foreign keys. When using the
very common ``MyISAM`` MySQL storage engine, the information loaded by table
reflection will not include foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload=True
)
.. seealso::
:ref:`mysql_storage_engines`
.. _mysql_unique_constraints:
MySQL Unique Constraints and Reflection
---------------------------------------
SQLAlchemy supports both the :class:`.Index` construct with the
flag ``unique=True``, indicating a UNIQUE index, as well as the
:class:`.UniqueConstraint` construct, representing a UNIQUE constraint.
Both objects/syntaxes are supported by MySQL when emitting DDL to create
these constraints. However, MySQL does not have a unique constraint
construct that is separate from a unique index; that is, the "UNIQUE"
constraint on MySQL is equivalent to creating a "UNIQUE INDEX".
When reflecting these constructs, the :meth:`.Inspector.get_indexes`
and the :meth:`.Inspector.get_unique_constraints` methods will **both**
return an entry for a UNIQUE index in MySQL. However, when performing
full table reflection using ``Table(..., autoload=True)``,
the :class:`.UniqueConstraint` construct is
**not** part of the fully reflected :class:`.Table` construct under any
circumstances; this construct is always represented by a :class:`.Index`
with the ``unique=True`` setting present in the :attr:`.Table.indexes`
collection.
.. _mysql_timestamp_null:
TIMESTAMP Columns and NULL
--------------------------
MySQL historically enforces that a column which specifies the
TIMESTAMP datatype implicitly includes a default value of
CURRENT_TIMESTAMP, even though this is not stated, and additionally
sets the column as NOT NULL, the opposite behavior vs. that of all
other datatypes::
mysql> CREATE TABLE ts_test (
-> a INTEGER,
-> b INTEGER NOT NULL,
-> c TIMESTAMP,
-> d TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-> e TIMESTAMP NULL);
Query OK, 0 rows affected (0.03 sec)
mysql> SHOW CREATE TABLE ts_test;
+---------+-----------------------------------------------------
| Table | Create Table
+---------+-----------------------------------------------------
| ts_test | CREATE TABLE `ts_test` (
`a` int(11) DEFAULT NULL,
`b` int(11) NOT NULL,
`c` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`e` timestamp NULL DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
Above, we see that an INTEGER column defaults to NULL, unless it is specified
with NOT NULL. But when the column is of type TIMESTAMP, an implicit
default of CURRENT_TIMESTAMP is generated which also coerces the column
to be a NOT NULL, even though we did not specify it as such.
This behavior of MySQL can be changed on the MySQL side using the
`explicit_defaults_for_timestamp
<http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html
#sysvar_explicit_defaults_for_timestamp>`_ configuration flag introduced in
MySQL 5.6. With this server setting enabled, TIMESTAMP columns behave like
any other datatype on the MySQL side with regards to defaults and nullability.
However, to accommodate the vast majority of MySQL databases that do not
specify this new flag, SQLAlchemy emits the "NULL" specifier explicitly with
any TIMESTAMP column that does not specify ``nullable=False``. In order
to accommodate newer databases that specify ``explicit_defaults_for_timestamp``,
SQLAlchemy also emits NOT NULL for TIMESTAMP columns that do specify
``nullable=False``. The following example illustrates::
from sqlalchemy import MetaData, Integer, Table, Column, text
from sqlalchemy.dialects.mysql import TIMESTAMP
m = MetaData()
t = Table('ts_test', m,
Column('a', Integer),
Column('b', Integer, nullable=False),
Column('c', TIMESTAMP),
Column('d', TIMESTAMP, nullable=False)
)
from sqlalchemy import create_engine
e = create_engine("mysql://scott:tiger@localhost/test", echo=True)
m.create_all(e)
output::
CREATE TABLE ts_test (
a INTEGER,
b INTEGER NOT NULL,
c TIMESTAMP NULL,
d TIMESTAMP NOT NULL
)
.. versionchanged:: 1.0.0 - SQLAlchemy now renders NULL or NOT NULL in all
cases for TIMESTAMP columns, to accommodate
``explicit_defaults_for_timestamp``. Prior to this version, it will
not render "NOT NULL" for a TIMESTAMP column that is ``nullable=False``.
"""
import datetime
import re
import sys
from ... import schema as sa_schema
from ... import exc, log, sql, util
from ...sql import compiler
from array import array as _array
from ...engine import reflection
from ...engine import default
from ... import types as sqltypes
from ...util import topological
from ...types import DATE, BOOLEAN, \
BLOB, BINARY, VARBINARY
RESERVED_WORDS = set(
['accessible', 'add', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both',
'by', 'call', 'cascade', 'case', 'change', 'char', 'character', 'check',
'collate', 'column', 'condition', 'constraint', 'continue', 'convert',
'create', 'cross', 'current_date', 'current_time', 'current_timestamp',
'current_user', 'cursor', 'database', 'databases', 'day_hour',
'day_microsecond', 'day_minute', 'day_second', 'dec', 'decimal',
'declare', 'default', 'delayed', 'delete', 'desc', 'describe',
'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop',
'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists',
'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8',
'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group',
'having', 'high_priority', 'hour_microsecond', 'hour_minute',
'hour_second', 'if', 'ignore', 'in', 'index', 'infile', 'inner', 'inout',
'insensitive', 'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8',
'integer', 'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys',
'kill', 'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines',
'load', 'localtime', 'localtimestamp', 'lock', 'long', 'longblob',
'longtext', 'loop', 'low_priority', 'master_ssl_verify_server_cert',
'match', 'mediumblob', 'mediumint', 'mediumtext', 'middleint',
'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural',
'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize',
'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile',
'precision', 'primary', 'procedure', 'purge', 'range', 'read', 'reads',
'read_only', 'read_write', 'real', 'references', 'regexp', 'release',
'rename', 'repeat', 'replace', 'require', 'restrict', 'return',
'revoke', 'right', 'rlike', 'schema', 'schemas', 'second_microsecond',
'select', 'sensitive', 'separator', 'set', 'show', 'smallint', 'spatial',
'specific', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning',
'sql_big_result', 'sql_calc_found_rows', 'sql_small_result', 'ssl',
'starting', 'straight_join', 'table', 'terminated', 'then', 'tinyblob',
'tinyint', 'tinytext', 'to', 'trailing', 'trigger', 'true', 'undo',
'union', 'unique', 'unlock', 'unsigned', 'update', 'usage', 'use',
'using', 'utc_date', 'utc_time', 'utc_timestamp', 'values', 'varbinary',
'varchar', 'varcharacter', 'varying', 'when', 'where', 'while', 'with',
'write', 'x509', 'xor', 'year_month', 'zerofill', # 5.0
'columns', 'fields', 'privileges', 'soname', 'tables', # 4.1
'accessible', 'linear', 'master_ssl_verify_server_cert', 'range',
'read_only', 'read_write', # 5.1
'general', 'ignore_server_ids', 'master_heartbeat_period', 'maxvalue',
'resignal', 'signal', 'slow', # 5.5
'get', 'io_after_gtids', 'io_before_gtids', 'master_bind', 'one_shot',
'partition', 'sql_after_gtids', 'sql_before_gtids', # 5.6
])
AUTOCOMMIT_RE = re.compile(
r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)',
re.I | re.UNICODE)
SET_RE = re.compile(
r'\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w',
re.I | re.UNICODE)
class _NumericType(object):
"""Base for MySQL numeric types.
This is the base both for NUMERIC as well as INTEGER, hence
it's a mixin.
"""
def __init__(self, unsigned=False, zerofill=False, **kw):
self.unsigned = unsigned
self.zerofill = zerofill
super(_NumericType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self,
to_inspect=[_NumericType, sqltypes.Numeric])
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
if isinstance(self, (REAL, DOUBLE)) and \
(
(precision is None and scale is not None) or
(precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
super(_FloatType, self).__init__(
precision=precision, asdecimal=asdecimal, **kw)
self.scale = scale
def __repr__(self):
return util.generic_repr(self, to_inspect=[_FloatType,
_NumericType,
sqltypes.Float])
class _IntegerType(_NumericType, sqltypes.Integer):
def __init__(self, display_width=None, **kw):
self.display_width = display_width
super(_IntegerType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self, to_inspect=[_IntegerType,
_NumericType,
sqltypes.Integer])
class _StringType(sqltypes.String):
"""Base for MySQL string types."""
def __init__(self, charset=None, collation=None,
ascii=False, binary=False, unicode=False,
national=False, **kw):
self.charset = charset
# allow collate= or collation=
kw.setdefault('collation', kw.pop('collate', collation))
self.ascii = ascii
self.unicode = unicode
self.binary = binary
self.national = national
super(_StringType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self,
to_inspect=[_StringType, sqltypes.String])
class _MatchType(sqltypes.Float, sqltypes.MatchType):
def __init__(self, **kw):
# TODO: float arguments?
sqltypes.Float.__init__(self)
sqltypes.MatchType.__init__(self)
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""MySQL NUMERIC type."""
__visit_name__ = 'NUMERIC'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(NUMERIC, self).__init__(precision=precision,
scale=scale, asdecimal=asdecimal, **kw)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
"""MySQL DECIMAL type."""
__visit_name__ = 'DECIMAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DECIMAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class DOUBLE(_FloatType):
"""MySQL DOUBLE type."""
__visit_name__ = 'DOUBLE'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
.. note::
The :class:`.DOUBLE` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DOUBLE, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class REAL(_FloatType, sqltypes.REAL):
"""MySQL REAL type."""
__visit_name__ = 'REAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
.. note::
The :class:`.REAL` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(REAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class FLOAT(_FloatType, sqltypes.FLOAT):
"""MySQL FLOAT type."""
__visit_name__ = 'FLOAT'
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(FLOAT, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
def bind_processor(self, dialect):
return None
class INTEGER(_IntegerType, sqltypes.INTEGER):
"""MySQL INTEGER type."""
__visit_name__ = 'INTEGER'
def __init__(self, display_width=None, **kw):
"""Construct an INTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(INTEGER, self).__init__(display_width=display_width, **kw)
class BIGINT(_IntegerType, sqltypes.BIGINT):
"""MySQL BIGINTEGER type."""
__visit_name__ = 'BIGINT'
def __init__(self, display_width=None, **kw):
"""Construct a BIGINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(BIGINT, self).__init__(display_width=display_width, **kw)
class MEDIUMINT(_IntegerType):
"""MySQL MEDIUMINTEGER type."""
__visit_name__ = 'MEDIUMINT'
def __init__(self, display_width=None, **kw):
"""Construct a MEDIUMINTEGER
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
class TINYINT(_IntegerType):
"""MySQL TINYINT type."""
__visit_name__ = 'TINYINT'
def __init__(self, display_width=None, **kw):
"""Construct a TINYINT.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(TINYINT, self).__init__(display_width=display_width, **kw)
class SMALLINT(_IntegerType, sqltypes.SMALLINT):
"""MySQL SMALLINTEGER type."""
__visit_name__ = 'SMALLINT'
def __init__(self, display_width=None, **kw):
"""Construct a SMALLINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(SMALLINT, self).__init__(display_width=display_width, **kw)
class BIT(sqltypes.TypeEngine):
"""MySQL BIT type.
This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
MSTinyInteger() type.
"""
__visit_name__ = 'BIT'
def __init__(self, length=None):
"""Construct a BIT.
:param length: Optional, number of bits.
"""
self.length = length
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
already do this, so this logic should be moved to those dialects.
"""
def process(value):
if value is not None:
v = 0
for i in value:
if not isinstance(i, int):
i = ord(i) # convert byte to int on Python 2
v = v << 8 | i
return v
return value
return process
class TIME(sqltypes.TIME):
"""MySQL TIME type. """
__visit_name__ = 'TIME'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8 The MySQL-specific TIME
type as well as fractional seconds support.
"""
super(TIME, self).__init__(timezone=timezone)
self.fsp = fsp
def result_processor(self, dialect, coltype):
time = datetime.time
def process(value):
# convert from a timedelta value
if value is not None:
microseconds = value.microseconds
seconds = value.seconds
minutes = seconds // 60
return time(minutes // 60,
minutes % 60,
seconds - minutes * 60,
microsecond=microseconds)
else:
return None
return process
class TIMESTAMP(sqltypes.TIMESTAMP):
"""MySQL TIMESTAMP type.
"""
__visit_name__ = 'TIMESTAMP'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIMESTAMP type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIMESTAMP type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.TIMESTAMP`
with fractional seconds support.
"""
super(TIMESTAMP, self).__init__(timezone=timezone)
self.fsp = fsp
class DATETIME(sqltypes.DATETIME):
"""MySQL DATETIME type.
"""
__visit_name__ = 'DATETIME'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL DATETIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the DATETIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.DATETIME`
with fractional seconds support.
"""
super(DATETIME, self).__init__(timezone=timezone)
self.fsp = fsp
class YEAR(sqltypes.TypeEngine):
"""MySQL YEAR type, for single byte storage of years 1901-2155."""
__visit_name__ = 'YEAR'
def __init__(self, display_width=None):
self.display_width = display_width
class TEXT(_StringType, sqltypes.TEXT):
"""MySQL TEXT type, for text up to 2^16 characters."""
__visit_name__ = 'TEXT'
def __init__(self, length=None, **kw):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TEXT, self).__init__(length=length, **kw)
class TINYTEXT(_StringType):
"""MySQL TINYTEXT type, for text up to 2^8 characters."""
__visit_name__ = 'TINYTEXT'
def __init__(self, **kwargs):
"""Construct a TINYTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TINYTEXT, self).__init__(**kwargs)
class MEDIUMTEXT(_StringType):
"""MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
__visit_name__ = 'MEDIUMTEXT'
def __init__(self, **kwargs):
"""Construct a MEDIUMTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MEDIUMTEXT, self).__init__(**kwargs)
class LONGTEXT(_StringType):
"""MySQL LONGTEXT type, for text up to 2^32 characters."""
__visit_name__ = 'LONGTEXT'
def __init__(self, **kwargs):
"""Construct a LONGTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(LONGTEXT, self).__init__(**kwargs)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""MySQL VARCHAR type, for variable-length character data."""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""MySQL CHAR type, for fixed-length character data."""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
super(CHAR, self).__init__(length=length, **kwargs)
@classmethod
def _adapt_string_for_cast(self, type_):
# copy the given string type into a CHAR
# for the purposes of rendering a CAST expression
type_ = sqltypes.to_instance(type_)
if isinstance(type_, sqltypes.CHAR):
return type_
elif isinstance(type_, _StringType):
return CHAR(
length=type_.length,
charset=type_.charset,
collation=type_.collation,
ascii=type_.ascii,
binary=type_.binary,
unicode=type_.unicode,
national=False # not supported in CAST
)
else:
return CHAR(length=type_.length)
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MySQL NVARCHAR type.
For variable-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NVARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NVARCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NVARCHAR, self).__init__(length=length, **kwargs)
class NCHAR(_StringType, sqltypes.NCHAR):
"""MySQL NCHAR type.
For fixed-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NCHAR, self).__init__(length=length, **kwargs)
class TINYBLOB(sqltypes._Binary):
"""MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
__visit_name__ = 'TINYBLOB'
class MEDIUMBLOB(sqltypes._Binary):
"""MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
__visit_name__ = 'MEDIUMBLOB'
class LONGBLOB(sqltypes._Binary):
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
__visit_name__ = 'LONGBLOB'
class _EnumeratedValues(_StringType):
def _init_values(self, values, kw):
self.quoting = kw.pop('quoting', 'auto')
if self.quoting == 'auto' and len(values):
# What quoting character are we using?
q = None
for e in values:
if len(e) == 0:
self.quoting = 'unquoted'
break
elif q is None:
q = e[0]
if len(e) == 1 or e[0] != q or e[-1] != q:
self.quoting = 'unquoted'
break
else:
self.quoting = 'quoted'
if self.quoting == 'quoted':
util.warn_deprecated(
'Manually quoting %s value literals is deprecated. Supply '
'unquoted values and use the quoting= option in cases of '
'ambiguity.' % self.__class__.__name__)
values = self._strip_values(values)
self._enumerated_values = values
length = max([len(v) for v in values] + [0])
return values, length
@classmethod
def _strip_values(cls, values):
strip_values = []
for a in values:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_values.append(a)
return strip_values
class ENUM(sqltypes.Enum, _EnumeratedValues):
"""MySQL ENUM type."""
__visit_name__ = 'ENUM'
def __init__(self, *enums, **kw):
"""Construct an ENUM.
E.g.::
Column('myenum', ENUM("foo", "bar", "baz"))
:param enums: The range of valid values for this ENUM. Values will be
quoted when generating the schema according to the quoting flag (see
below).
:param strict: Defaults to False: ensure that a given value is in this
ENUM's range of permissible values when inserting or updating rows.
Note that MySQL will not raise a fatal error if you attempt to store
an out of range value- an alternate value will be stored instead.
(See MySQL ENUM documentation.)
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
"""
values, length = self._init_values(enums, kw)
self.strict = kw.pop('strict', False)
kw.pop('metadata', None)
kw.pop('schema', None)
kw.pop('name', None)
kw.pop('quote', None)
kw.pop('native_enum', None)
kw.pop('inherit_schema', None)
kw.pop('_create_events', None)
_StringType.__init__(self, length=length, **kw)
sqltypes.Enum.__init__(self, *values)
def __repr__(self):
return util.generic_repr(
self, to_inspect=[ENUM, _StringType, sqltypes.Enum])
def bind_processor(self, dialect):
super_convert = super(ENUM, self).bind_processor(dialect)
def process(value):
if self.strict and value is not None and value not in self.enums:
raise exc.InvalidRequestError('"%s" not a valid value for '
'this enum' % value)
if super_convert:
return super_convert(value)
else:
return value
return process
def adapt(self, cls, **kw):
if issubclass(cls, ENUM):
kw['strict'] = self.strict
return sqltypes.Enum.adapt(self, cls, **kw)
class SET(_EnumeratedValues):
"""MySQL SET type."""
__visit_name__ = 'SET'
def __init__(self, *values, **kw):
"""Construct a SET.
E.g.::
Column('myset', SET("foo", "bar", "baz"))
The list of potential values is required in the case that this
set will be used to generate DDL for a table, or if the
:paramref:`.SET.retrieve_as_bitwise` flag is set to True.
:param values: The range of valid values for this SET.
:param convert_unicode: Same flag as that of
:paramref:`.String.convert_unicode`.
:param collation: same as that of :paramref:`.String.collation`
:param charset: same as that of :paramref:`.VARCHAR.charset`.
:param ascii: same as that of :paramref:`.VARCHAR.ascii`.
:param unicode: same as that of :paramref:`.VARCHAR.unicode`.
:param binary: same as that of :paramref:`.VARCHAR.binary`.
:param quoting: Defaults to 'auto': automatically determine set value
quoting. If all values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
.. versionadded:: 0.9.0
:param retrieve_as_bitwise: if True, the data for the set type will be
persisted and selected using an integer value, where a set is coerced
into a bitwise mask for persistence. MySQL allows this mode which
has the advantage of being able to store values unambiguously,
such as the blank string ``''``. The datatype will appear
as the expression ``col + 0`` in a SELECT statement, so that the
value is coerced into an integer value in result sets.
This flag is required if one wishes
to persist a set that can store the blank string ``''`` as a value.
.. warning::
When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
essential that the list of set values is expressed in the
**exact same order** as exists on the MySQL database.
.. versionadded:: 1.0.0
"""
self.retrieve_as_bitwise = kw.pop('retrieve_as_bitwise', False)
values, length = self._init_values(values, kw)
self.values = tuple(values)
if not self.retrieve_as_bitwise and '' in values:
raise exc.ArgumentError(
"Can't use the blank value '' in a SET without "
"setting retrieve_as_bitwise=True")
if self.retrieve_as_bitwise:
self._bitmap = dict(
(value, 2 ** idx)
for idx, value in enumerate(self.values)
)
self._bitmap.update(
(2 ** idx, value)
for idx, value in enumerate(self.values)
)
kw.setdefault('length', length)
super(SET, self).__init__(**kw)
def column_expression(self, colexpr):
if self.retrieve_as_bitwise:
return sql.type_coerce(
sql.type_coerce(colexpr, sqltypes.Integer) + 0,
self
)
else:
return colexpr
def result_processor(self, dialect, coltype):
if self.retrieve_as_bitwise:
def process(value):
if value is not None:
value = int(value)
return set(
util.map_bits(self._bitmap.__getitem__, value)
)
else:
return None
else:
super_convert = super(SET, self).result_processor(dialect, coltype)
def process(value):
if isinstance(value, util.string_types):
# MySQLdb returns a string, let's parse
if super_convert:
value = super_convert(value)
return set(re.findall(r'[^,]+', value))
else:
# mysql-connector-python does a naive
# split(",") which throws in an empty string
if value is not None:
value.discard('')
return value
return process
def bind_processor(self, dialect):
super_convert = super(SET, self).bind_processor(dialect)
if self.retrieve_as_bitwise:
def process(value):
if value is None:
return None
elif isinstance(value, util.int_types + util.string_types):
if super_convert:
return super_convert(value)
else:
return value
else:
int_value = 0
for v in value:
int_value |= self._bitmap[v]
return int_value
else:
def process(value):
# accept strings and int (actually bitflag) values directly
if value is not None and not isinstance(
value, util.int_types + util.string_types):
value = ",".join(value)
if super_convert:
return super_convert(value)
else:
return value
return process
def adapt(self, impltype, **kw):
kw['retrieve_as_bitwise'] = self.retrieve_as_bitwise
return util.constructor_copy(
self, impltype,
*self.values,
**kw
)
# old names
MSTime = TIME
MSSet = SET
MSEnum = ENUM
MSLongBlob = LONGBLOB
MSMediumBlob = MEDIUMBLOB
MSTinyBlob = TINYBLOB
MSBlob = BLOB
MSBinary = BINARY
MSVarBinary = VARBINARY
MSNChar = NCHAR
MSNVarChar = NVARCHAR
MSChar = CHAR
MSString = VARCHAR
MSLongText = LONGTEXT
MSMediumText = MEDIUMTEXT
MSTinyText = TINYTEXT
MSText = TEXT
MSYear = YEAR
MSTimeStamp = TIMESTAMP
MSBit = BIT
MSSmallInteger = SMALLINT
MSTinyInteger = TINYINT
MSMediumInteger = MEDIUMINT
MSBigInteger = BIGINT
MSNumeric = NUMERIC
MSDecimal = DECIMAL
MSDouble = DOUBLE
MSReal = REAL
MSFloat = FLOAT
MSInteger = INTEGER
colspecs = {
_IntegerType: _IntegerType,
_NumericType: _NumericType,
_FloatType: _FloatType,
sqltypes.Numeric: NUMERIC,
sqltypes.Float: FLOAT,
sqltypes.Time: TIME,
sqltypes.Enum: ENUM,
sqltypes.MatchType: _MatchType
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
ischema_names = {
'bigint': BIGINT,
'binary': BINARY,
'bit': BIT,
'blob': BLOB,
'boolean': BOOLEAN,
'char': CHAR,
'date': DATE,
'datetime': DATETIME,
'decimal': DECIMAL,
'double': DOUBLE,
'enum': ENUM,
'fixed': DECIMAL,
'float': FLOAT,
'int': INTEGER,
'integer': INTEGER,
'longblob': LONGBLOB,
'longtext': LONGTEXT,
'mediumblob': MEDIUMBLOB,
'mediumint': MEDIUMINT,
'mediumtext': MEDIUMTEXT,
'nchar': NCHAR,
'nvarchar': NVARCHAR,
'numeric': NUMERIC,
'set': SET,
'smallint': SMALLINT,
'text': TEXT,
'time': TIME,
'timestamp': TIMESTAMP,
'tinyblob': TINYBLOB,
'tinyint': TINYINT,
'tinytext': TINYTEXT,
'varbinary': VARBINARY,
'varchar': VARCHAR,
'year': YEAR,
}
class MySQLExecutionContext(default.DefaultExecutionContext):
def should_autocommit_text(self, statement):
return AUTOCOMMIT_RE.match(statement)
class MySQLCompiler(compiler.SQLCompiler):
render_table_with_column_in_update_from = True
"""Overridden from base SQLCompiler value"""
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update({'milliseconds': 'millisecond'})
def visit_random_func(self, fn, **kw):
return "rand%s" % self.function_argspec(fn)
def visit_utc_timestamp_func(self, fn, **kw):
return "UTC_TIMESTAMP"
def visit_sysdate_func(self, fn, **kw):
return "SYSDATE()"
def visit_concat_op_binary(self, binary, operator, **kw):
return "concat(%s, %s)" % (self.process(binary.left),
self.process(binary.right))
def visit_match_op_binary(self, binary, operator, **kw):
return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % \
(self.process(binary.left), self.process(binary.right))
def get_from_hint_text(self, table, text):
return text
def visit_typeclause(self, typeclause, type_=None):
if type_ is None:
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, sqltypes.TypeDecorator):
return self.visit_typeclause(typeclause, type_.impl)
elif isinstance(type_, sqltypes.Integer):
if getattr(type_, 'unsigned', False):
return 'UNSIGNED INTEGER'
else:
return 'SIGNED INTEGER'
elif isinstance(type_, sqltypes.TIMESTAMP):
return 'DATETIME'
elif isinstance(type_, (sqltypes.DECIMAL, sqltypes.DateTime,
sqltypes.Date, sqltypes.Time)):
return self.dialect.type_compiler.process(type_)
elif isinstance(type_, sqltypes.String) \
and not isinstance(type_, (ENUM, SET)):
adapted = CHAR._adapt_string_for_cast(type_)
return self.dialect.type_compiler.process(adapted)
elif isinstance(type_, sqltypes._Binary):
return 'BINARY'
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(
type_).replace('NUMERIC', 'DECIMAL')
else:
return None
def visit_cast(self, cast, **kwargs):
# No cast until 4, no decimals until 5.
if not self.dialect._supports_cast:
util.warn(
"Current MySQL version does not support "
"CAST; the CAST will be skipped.")
return self.process(cast.clause.self_group())
type_ = self.process(cast.typeclause)
if type_ is None:
util.warn(
"Datatype %s does not support CAST on MySQL; "
"the CAST will be skipped." %
self.dialect.type_compiler.process(cast.typeclause.type))
return self.process(cast.clause.self_group())
return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
def render_literal_value(self, value, type_):
value = super(MySQLCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace('\\', '\\\\')
return value
# override native_boolean=False behavior here, as
# MySQL still supports native boolean
def visit_true(self, element, **kw):
return "true"
def visit_false(self, element, **kw):
return "false"
def get_select_precolumns(self, select, **kw):
"""Add special MySQL keywords in place of DISTINCT.
.. note::
this usage is deprecated. :meth:`.Select.prefix_with`
should be used for special keywords at the start
of a SELECT.
"""
if isinstance(select._distinct, util.string_types):
return select._distinct.upper() + " "
elif select._distinct:
return "DISTINCT "
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return ''.join(
(self.process(join.left, asfrom=True, **kwargs),
(join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "),
self.process(join.right, asfrom=True, **kwargs),
" ON ",
self.process(join.onclause, **kwargs)))
def for_update_clause(self, select, **kw):
if select._for_update_arg.read:
return " LOCK IN SHARE MODE"
else:
return " FOR UPDATE"
def limit_clause(self, select, **kw):
# MySQL supports:
# LIMIT <limit>
# LIMIT <offset>, <limit>
# and in server versions > 3.3:
# LIMIT <limit> OFFSET <offset>
# The latter is more readable for offsets but we're stuck with the
# former until we can refine dialects by server revision.
limit_clause, offset_clause = select._limit_clause, \
select._offset_clause
if limit_clause is None and offset_clause is None:
return ''
elif offset_clause is not None:
# As suggested by the MySQL docs, need to apply an
# artificial limit if one wasn't provided
# http://dev.mysql.com/doc/refman/5.0/en/select.html
if limit_clause is None:
# hardwire the upper limit. Currently
# needed by OurSQL with Python 3
# (https://bugs.launchpad.net/oursql/+bug/686232),
# but also is consistent with the usage of the upper
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return ' \n LIMIT %s, %s' % (
self.process(offset_clause, **kw),
"18446744073709551615")
else:
return ' \n LIMIT %s, %s' % (
self.process(offset_clause, **kw),
self.process(limit_clause, **kw))
else:
# No offset provided, so just use the limit
return ' \n LIMIT %s' % (self.process(limit_clause, **kw),)
def update_limit_clause(self, update_stmt):
limit = update_stmt.kwargs.get('%s_limit' % self.dialect.name, None)
if limit:
return "LIMIT %s" % limit
else:
return None
def update_tables_clause(self, update_stmt, from_table,
extra_froms, **kw):
return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
for t in [from_table] + list(extra_froms))
def update_from_clause(self, update_stmt, from_table,
extra_froms, from_hints, **kw):
return None
class MySQLDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kw):
"""Builds column DDL."""
colspec = [
self.preparer.format_column(column),
self.dialect.type_compiler.process(
column.type, type_expression=column)
]
is_timestamp = isinstance(column.type, sqltypes.TIMESTAMP)
if not column.nullable:
colspec.append('NOT NULL')
# see: http://docs.sqlalchemy.org/en/latest/dialects/
# mysql.html#mysql_timestamp_null
elif column.nullable and is_timestamp:
colspec.append('NULL')
default = self.get_column_default_string(column)
if default is not None:
colspec.append('DEFAULT ' + default)
if column.table is not None \
and column is column.table._autoincrement_column and \
column.server_default is None:
colspec.append('AUTO_INCREMENT')
return ' '.join(colspec)
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
opts = dict(
(
k[len(self.dialect.name) + 1:].upper(),
v
)
for k, v in table.kwargs.items()
if k.startswith('%s_' % self.dialect.name)
)
for opt in topological.sort([
('DEFAULT_CHARSET', 'COLLATE'),
('DEFAULT_CHARACTER_SET', 'COLLATE'),
('PARTITION_BY', 'PARTITIONS'), # only for test consistency
], opts):
arg = opts[opt]
if opt in _options_of_type_string:
arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
'DEFAULT_CHARSET',
'DEFAULT_COLLATE', 'PARTITION_BY'):
opt = opt.replace('_', ' ')
joiner = '='
if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET',
'CHARACTER SET', 'COLLATE',
'PARTITION BY', 'PARTITIONS'):
joiner = ' '
table_opts.append(joiner.join((opt, arg)))
return ' '.join(table_opts)
def visit_create_index(self, create):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
table = preparer.format_table(index.table)
columns = [self.sql_compiler.process(expr, include_table=False,
literal_binds=True)
for expr in index.expressions]
name = self._prepared_index_name(index)
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s " % (name, table)
length = index.dialect_options['mysql']['length']
if length is not None:
if isinstance(length, dict):
# length value can be a (column_name --> integer value)
# mapping specifying the prefix length for each column of the
# index
columns = ', '.join(
'%s(%d)' % (expr, length[col.name]) if col.name in length
else
(
'%s(%d)' % (expr, length[expr]) if expr in length
else '%s' % expr
)
for col, expr in zip(index.expressions, columns)
)
else:
# or can be an integer value specifying the same
# prefix length for all columns of the index
columns = ', '.join(
'%s(%d)' % (col, length)
for col in columns
)
else:
columns = ', '.join(columns)
text += '(%s)' % columns
using = index.dialect_options['mysql']['using']
if using is not None:
text += " USING %s" % (preparer.quote(using))
return text
def visit_primary_key_constraint(self, constraint):
text = super(MySQLDDLCompiler, self).\
visit_primary_key_constraint(constraint)
using = constraint.dialect_options['mysql']['using']
if using:
text += " USING %s" % (self.preparer.quote(using))
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(index,
include_schema=False),
self.preparer.format_table(index.table))
def visit_drop_constraint(self, drop):
constraint = drop.element
if isinstance(constraint, sa_schema.ForeignKeyConstraint):
qual = "FOREIGN KEY "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.PrimaryKeyConstraint):
qual = "PRIMARY KEY "
const = ""
elif isinstance(constraint, sa_schema.UniqueConstraint):
qual = "INDEX "
const = self.preparer.format_constraint(constraint)
else:
qual = ""
const = self.preparer.format_constraint(constraint)
return "ALTER TABLE %s DROP %s%s" % \
(self.preparer.format_table(constraint.table),
qual, const)
def define_constraint_match(self, constraint):
if constraint.match is not None:
raise exc.CompileError(
"MySQL ignores the 'MATCH' keyword while at the same time "
"causes ON UPDATE/ON DELETE clauses to be ignored.")
return ""
class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def _extend_numeric(self, type_, spec):
"Extend a numeric-type declaration with MySQL specific extensions."
if not self._mysql_type(type_):
return spec
if type_.unsigned:
spec += ' UNSIGNED'
if type_.zerofill:
spec += ' ZEROFILL'
return spec
def _extend_string(self, type_, defaults, spec):
"""Extend a string-type declaration with standard SQL CHARACTER SET /
COLLATE annotations and MySQL specific extensions.
"""
def attr(name):
return getattr(type_, name, defaults.get(name))
if attr('charset'):
charset = 'CHARACTER SET %s' % attr('charset')
elif attr('ascii'):
charset = 'ASCII'
elif attr('unicode'):
charset = 'UNICODE'
else:
charset = None
if attr('collation'):
collation = 'COLLATE %s' % type_.collation
elif attr('binary'):
collation = 'BINARY'
else:
collation = None
if attr('national'):
# NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
return ' '.join([c for c in ('NATIONAL', spec, collation)
if c is not None])
return ' '.join([c for c in (spec, charset, collation)
if c is not None])
def _mysql_type(self, type_):
return isinstance(type_, (_StringType, _NumericType))
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
return self._extend_numeric(type_,
"NUMERIC(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_,
"NUMERIC(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale': type_.scale})
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
return self._extend_numeric(type_,
"DECIMAL(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_,
"DECIMAL(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale': type_.scale})
def visit_DOUBLE(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_,
"DOUBLE(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale': type_.scale})
else:
return self._extend_numeric(type_, 'DOUBLE')
def visit_REAL(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_,
"REAL(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale': type_.scale})
else:
return self._extend_numeric(type_, 'REAL')
def visit_FLOAT(self, type_, **kw):
if self._mysql_type(type_) and \
type_.scale is not None and \
type_.precision is not None:
return self._extend_numeric(
type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale))
elif type_.precision is not None:
return self._extend_numeric(type_,
"FLOAT(%s)" % (type_.precision,))
else:
return self._extend_numeric(type_, "FLOAT")
def visit_INTEGER(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "INTEGER(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "BIGINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "MEDIUMINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "MEDIUMINT")
def visit_TINYINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"TINYINT(%s)" % type_.display_width)
else:
return self._extend_numeric(type_, "TINYINT")
def visit_SMALLINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"SMALLINT(%(display_width)s)" %
{'display_width': type_.display_width}
)
else:
return self._extend_numeric(type_, "SMALLINT")
def visit_BIT(self, type_, **kw):
if type_.length is not None:
return "BIT(%s)" % type_.length
else:
return "BIT"
def visit_DATETIME(self, type_, **kw):
if getattr(type_, 'fsp', None):
return "DATETIME(%d)" % type_.fsp
else:
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
if getattr(type_, 'fsp', None):
return "TIME(%d)" % type_.fsp
else:
return "TIME"
def visit_TIMESTAMP(self, type_, **kw):
if getattr(type_, 'fsp', None):
return "TIMESTAMP(%d)" % type_.fsp
else:
return "TIMESTAMP"
def visit_YEAR(self, type_, **kw):
if type_.display_width is None:
return "YEAR"
else:
return "YEAR(%s)" % type_.display_width
def visit_TEXT(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "TEXT(%d)" % type_.length)
else:
return self._extend_string(type_, {}, "TEXT")
def visit_TINYTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "TINYTEXT")
def visit_MEDIUMTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "MEDIUMTEXT")
def visit_LONGTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "LONGTEXT")
def visit_VARCHAR(self, type_, **kw):
if type_.length:
return self._extend_string(
type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_CHAR(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "CHAR(%(length)s)" %
{'length': type_.length})
else:
return self._extend_string(type_, {}, "CHAR")
def visit_NVARCHAR(self, type_, **kw):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
if type_.length:
return self._extend_string(
type_, {'national': True},
"VARCHAR(%(length)s)" % {'length': type_.length})
else:
raise exc.CompileError(
"NVARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_NCHAR(self, type_, **kw):
# We'll actually generate the equiv.
# "NATIONAL CHAR" instead of "NCHAR".
if type_.length:
return self._extend_string(
type_, {'national': True},
"CHAR(%(length)s)" % {'length': type_.length})
else:
return self._extend_string(type_, {'national': True}, "CHAR")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY(%d)" % type_.length
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_enum(self, type_, **kw):
if not type_.native_enum:
return super(MySQLTypeCompiler, self).visit_enum(type_)
else:
return self._visit_enumerated_values("ENUM", type_, type_.enums)
def visit_BLOB(self, type_, **kw):
if type_.length:
return "BLOB(%d)" % type_.length
else:
return "BLOB"
def visit_TINYBLOB(self, type_, **kw):
return "TINYBLOB"
def visit_MEDIUMBLOB(self, type_, **kw):
return "MEDIUMBLOB"
def visit_LONGBLOB(self, type_, **kw):
return "LONGBLOB"
def _visit_enumerated_values(self, name, type_, enumerated_values):
quoted_enums = []
for e in enumerated_values:
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend_string(type_, {}, "%s(%s)" % (
name, ",".join(quoted_enums))
)
def visit_ENUM(self, type_, **kw):
return self._visit_enumerated_values("ENUM", type_,
type_._enumerated_values)
def visit_SET(self, type_, **kw):
return self._visit_enumerated_values("SET", type_,
type_._enumerated_values)
def visit_BOOLEAN(self, type, **kw):
return "BOOL"
class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect, server_ansiquotes=False, **kw):
if not server_ansiquotes:
quote = "`"
else:
quote = '"'
super(MySQLIdentifierPreparer, self).__init__(
dialect,
initial_quote=quote,
escape_quote=quote)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
return tuple([self.quote_identifier(i) for i in ids if i is not None])
@log.class_logger
class MySQLDialect(default.DefaultDialect):
"""Details of the MySQL dialect.
Not used directly in application code.
"""
name = 'mysql'
supports_alter = True
# MySQL has no true "boolean" type; we
# allow for the "true" and "false" keywords, however
supports_native_boolean = False
# identifiers are 64, however aliases can be 255...
max_identifier_length = 255
max_index_name_length = 64
supports_native_enum = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_multivalues_insert = True
default_paramstyle = 'format'
colspecs = colspecs
statement_compiler = MySQLCompiler
ddl_compiler = MySQLDDLCompiler
type_compiler = MySQLTypeCompiler
ischema_names = ischema_names
preparer = MySQLIdentifierPreparer
# default SQL compilation settings -
# these are modified upon initialize(),
# i.e. first connect
_backslash_escapes = True
_server_ansiquotes = False
construct_arguments = [
(sa_schema.Table, {
"*": None
}),
(sql.Update, {
"limit": None
}),
(sa_schema.PrimaryKeyConstraint, {
"using": None
}),
(sa_schema.Index, {
"using": None,
"length": None,
})
]
def __init__(self, isolation_level=None, **kwargs):
kwargs.pop('use_ansiquotes', None) # legacy
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute('SELECT @@tx_isolation')
val = cursor.fetchone()[0]
cursor.close()
if util.py3k and isinstance(val, bytes):
val = val.decode()
return val.upper().replace("-", " ")
def do_commit(self, dbapi_connection):
"""Execute a COMMIT."""
# COMMIT/ROLLBACK were introduced in 3.23.15.
# Yes, we have at least one user who has to talk to these old
# versions!
#
# Ignore commit/rollback if support isn't present, otherwise even
# basic operations via autocommit fail.
try:
dbapi_connection.commit()
except Exception:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_rollback(self, dbapi_connection):
"""Execute a ROLLBACK."""
try:
dbapi_connection.rollback()
except Exception:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_begin_twophase(self, connection, xid):
connection.execute(sql.text("XA BEGIN :xid"), xid=xid)
def do_prepare_twophase(self, connection, xid):
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA PREPARE :xid"), xid=xid)
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA ROLLBACK :xid"), xid=xid)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
connection.execute(sql.text("XA COMMIT :xid"), xid=xid)
def do_recover_twophase(self, connection):
resultset = connection.execute("XA RECOVER")
return [row['data'][0:row['gtrid_length']] for row in resultset]
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,
self.dbapi.ProgrammingError)):
return self._extract_error_code(e) in \
(2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError):
# if underlying connection is closed,
# this is the error you get
return "(0, '')" in str(e)
else:
return False
def _compat_fetchall(self, rp, charset=None):
"""Proxy result rows to smooth over MySQL-Python driver
inconsistencies."""
return [_DecodingRowProxy(row, charset) for row in rp.fetchall()]
def _compat_fetchone(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
return _DecodingRowProxy(rp.fetchone(), charset)
def _compat_first(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
return _DecodingRowProxy(rp.first(), charset)
def _extract_error_code(self, exception):
raise NotImplementedError()
def _get_default_schema_name(self, connection):
return connection.execute('SELECT DATABASE()').scalar()
def has_table(self, connection, table_name, schema=None):
# SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly
# on macosx (and maybe win?) with multibyte table names.
#
# TODO: if this is not a problem on win, make the strategy swappable
# based on platform. DESCRIBE is slower.
# [ticket:726]
# full_name = self.identifier_preparer.format_table(table,
# use_schema=True)
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, table_name))
st = "DESCRIBE %s" % full_name
rs = None
try:
try:
rs = connection.execution_options(
skip_user_error_events=True).execute(st)
have = rs.fetchone() is not None
rs.close()
return have
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
return False
raise
finally:
if rs:
rs.close()
def initialize(self, connection):
self._connection_charset = self._detect_charset(connection)
self._detect_ansiquotes(connection)
if self._server_ansiquotes:
# if ansiquotes == True, build a new IdentifierPreparer
# with the new setting
self.identifier_preparer = self.preparer(
self, server_ansiquotes=self._server_ansiquotes)
default.DefaultDialect.initialize(self, connection)
@property
def _supports_cast(self):
return self.server_version_info is None or \
self.server_version_info >= (4, 0, 2)
@reflection.cache
def get_schema_names(self, connection, **kw):
rp = connection.execute("SHOW schemas")
return [r[0] for r in rp]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = self._connection_charset
if self.server_version_info < (5, 0, 2):
rp = connection.execute(
"SHOW TABLES FROM %s" %
self.identifier_preparer.quote_identifier(current_schema))
return [row[0] for
row in self._compat_fetchall(rp, charset=charset)]
else:
rp = connection.execute(
"SHOW FULL TABLES FROM %s" %
self.identifier_preparer.quote_identifier(current_schema))
return [row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] == 'BASE TABLE']
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if self.server_version_info < (5, 0, 2):
raise NotImplementedError
if schema is None:
schema = self.default_schema_name
if self.server_version_info < (5, 0, 2):
return self.get_table_names(connection, schema)
charset = self._connection_charset
rp = connection.execute(
"SHOW FULL TABLES FROM %s" %
self.identifier_preparer.quote_identifier(schema))
return [row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] in ('VIEW', 'SYSTEM VIEW')]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
return parsed_state.table_options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
return parsed_state.columns
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
for key in parsed_state.keys:
if key['type'] == 'PRIMARY':
# There can be only one.
cols = [s[0] for s in key['columns']]
return {'constrained_columns': cols, 'name': None}
return {'constrained_columns': [], 'name': None}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
default_schema = None
fkeys = []
for spec in parsed_state.constraints:
# only FOREIGN KEYs
ref_name = spec['table'][-1]
ref_schema = len(spec['table']) > 1 and \
spec['table'][-2] or schema
if not ref_schema:
if default_schema is None:
default_schema = \
connection.dialect.default_schema_name
if schema == default_schema:
ref_schema = schema
loc_names = spec['local']
ref_names = spec['foreign']
con_kw = {}
for opt in ('onupdate', 'ondelete'):
if spec.get(opt, False):
con_kw[opt] = spec[opt]
fkey_d = {
'name': spec['name'],
'constrained_columns': loc_names,
'referred_schema': ref_schema,
'referred_table': ref_name,
'referred_columns': ref_names,
'options': con_kw
}
fkeys.append(fkey_d)
return fkeys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
indexes = []
for spec in parsed_state.keys:
unique = False
flavor = spec['type']
if flavor == 'PRIMARY':
continue
if flavor == 'UNIQUE':
unique = True
elif flavor in (None, 'FULLTEXT', 'SPATIAL'):
pass
else:
self.logger.info(
"Converting unknown KEY type %s to a plain KEY", flavor)
pass
index_d = {}
index_d['name'] = spec['name']
index_d['column_names'] = [s[0] for s in spec['columns']]
index_d['unique'] = unique
if flavor:
index_d['type'] = flavor
indexes.append(index_d)
return indexes
@reflection.cache
def get_unique_constraints(self, connection, table_name,
schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
return [
{
'name': key['name'],
'column_names': [col[0] for col in key['columns']],
'duplicates_index': key['name'],
}
for key in parsed_state.keys
if key['type'] == 'UNIQUE'
]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
charset = self._connection_charset
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, view_name))
sql = self._show_create_table(connection, None, charset,
full_name=full_name)
return sql
def _parsed_state_or_create(self, connection, table_name,
schema=None, **kw):
return self._setup_parser(
connection,
table_name,
schema,
info_cache=kw.get('info_cache', None)
)
@util.memoized_property
def _tabledef_parser(self):
"""return the MySQLTableDefinitionParser, generate if needed.
The deferred creation ensures that the dialect has
retrieved server version information first.
"""
if (self.server_version_info < (4, 1) and self._server_ansiquotes):
# ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1
preparer = self.preparer(self, server_ansiquotes=False)
else:
preparer = self.identifier_preparer
return MySQLTableDefinitionParser(self, preparer)
@reflection.cache
def _setup_parser(self, connection, table_name, schema=None, **kw):
charset = self._connection_charset
parser = self._tabledef_parser
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, table_name))
sql = self._show_create_table(connection, None, charset,
full_name=full_name)
if sql.startswith('CREATE ALGORITHM'):
# Adapt views to something table-like.
columns = self._describe_table(connection, None, charset,
full_name=full_name)
sql = parser._describe_to_create(table_name, columns)
return parser.parse(sql, charset)
def _detect_charset(self, connection):
raise NotImplementedError()
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
# http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html
charset = self._connection_charset
row = self._compat_first(connection.execute(
"SHOW VARIABLES LIKE 'lower_case_table_names'"),
charset=charset)
if not row:
cs = 0
else:
# 4.0.15 returns OFF or ON according to [ticket:489]
# 3.23 doesn't, 4.0.27 doesn't..
if row[1] == 'OFF':
cs = 0
elif row[1] == 'ON':
cs = 1
else:
cs = int(row[1])
return cs
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
if self.server_version_info < (4, 1, 0):
pass
else:
charset = self._connection_charset
rs = connection.execute('SHOW COLLATION')
for row in self._compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
def _detect_ansiquotes(self, connection):
"""Detect and adjust for the ANSI_QUOTES sql mode."""
row = self._compat_first(
connection.execute("SHOW VARIABLES LIKE 'sql_mode'"),
charset=self._connection_charset)
if not row:
mode = ''
else:
mode = row[1] or ''
# 4.0
if mode.isdigit():
mode_no = int(mode)
mode = (mode_no | 4 == mode_no) and 'ANSI_QUOTES' or ''
self._server_ansiquotes = 'ANSI_QUOTES' in mode
# as of MySQL 5.0.1
self._backslash_escapes = 'NO_BACKSLASH_ESCAPES' not in mode
def _show_create_table(self, connection, table, charset=None,
full_name=None):
"""Run SHOW CREATE TABLE for a ``Table``."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "SHOW CREATE TABLE %s" % full_name
rp = None
try:
rp = connection.execution_options(
skip_user_error_events=True).execute(st)
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
row = self._compat_first(rp, charset=charset)
if not row:
raise exc.NoSuchTableError(full_name)
return row[1].strip()
return sql
def _describe_table(self, connection, table, charset=None,
full_name=None):
"""Run DESCRIBE for a ``Table`` and return processed rows."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "DESCRIBE %s" % full_name
rp, rows = None, None
try:
try:
rp = connection.execution_options(
skip_user_error_events=True).execute(st)
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
rows = self._compat_fetchall(rp, charset=charset)
finally:
if rp:
rp.close()
return rows
class ReflectedState(object):
"""Stores raw information about a SHOW CREATE TABLE statement."""
def __init__(self):
self.columns = []
self.table_options = {}
self.table_name = None
self.keys = []
self.constraints = []
@log.class_logger
class MySQLTableDefinitionParser(object):
"""Parses the results of a SHOW CREATE TABLE statement."""
def __init__(self, dialect, preparer):
self.dialect = dialect
self.preparer = preparer
self._prep_regexes()
def parse(self, show_create, charset):
state = ReflectedState()
state.charset = charset
for line in re.split(r'\r?\n', show_create):
if line.startswith(' ' + self.preparer.initial_quote):
self._parse_column(line, state)
# a regular table options line
elif line.startswith(') '):
self._parse_table_options(line, state)
# an ANSI-mode table options line
elif line == ')':
pass
elif line.startswith('CREATE '):
self._parse_table_name(line, state)
# Not present in real reflection, but may be if
# loading from a file.
elif not line:
pass
else:
type_, spec = self._parse_constraints(line)
if type_ is None:
util.warn("Unknown schema content: %r" % line)
elif type_ == 'key':
state.keys.append(spec)
elif type_ == 'constraint':
state.constraints.append(spec)
else:
pass
return state
def _parse_constraints(self, line):
"""Parse a KEY or CONSTRAINT line.
:param line: A line of SHOW CREATE TABLE output
"""
# KEY
m = self._re_key.match(line)
if m:
spec = m.groupdict()
# convert columns into name, length pairs
spec['columns'] = self._parse_keyexprs(spec['columns'])
return 'key', spec
# CONSTRAINT
m = self._re_constraint.match(line)
if m:
spec = m.groupdict()
spec['table'] = \
self.preparer.unformat_identifiers(spec['table'])
spec['local'] = [c[0]
for c in self._parse_keyexprs(spec['local'])]
spec['foreign'] = [c[0]
for c in self._parse_keyexprs(spec['foreign'])]
return 'constraint', spec
# PARTITION and SUBPARTITION
m = self._re_partition.match(line)
if m:
# Punt!
return 'partition', line
# No match.
return (None, line)
def _parse_table_name(self, line, state):
"""Extract the table name.
:param line: The first line of SHOW CREATE TABLE
"""
regex, cleanup = self._pr_name
m = regex.match(line)
if m:
state.table_name = cleanup(m.group('name'))
def _parse_table_options(self, line, state):
"""Build a dictionary of all reflected table-level options.
:param line: The final line of SHOW CREATE TABLE output.
"""
options = {}
if not line or line == ')':
pass
else:
rest_of_line = line[:]
for regex, cleanup in self._pr_options:
m = regex.search(rest_of_line)
if not m:
continue
directive, value = m.group('directive'), m.group('val')
if cleanup:
value = cleanup(value)
options[directive.lower()] = value
rest_of_line = regex.sub('', rest_of_line)
for nope in ('auto_increment', 'data directory', 'index directory'):
options.pop(nope, None)
for opt, val in options.items():
state.table_options['%s_%s' % (self.dialect.name, opt)] = val
def _parse_column(self, line, state):
"""Extract column details.
Falls back to a 'minimal support' variant if full parse fails.
:param line: Any column-bearing line from SHOW CREATE TABLE
"""
spec = None
m = self._re_column.match(line)
if m:
spec = m.groupdict()
spec['full'] = True
else:
m = self._re_column_loose.match(line)
if m:
spec = m.groupdict()
spec['full'] = False
if not spec:
util.warn("Unknown column definition %r" % line)
return
if not spec['full']:
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args = spec['name'], spec['coltype'], spec['arg']
try:
col_type = self.dialect.ischema_names[type_]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
col_type = sqltypes.NullType
# Column type positional arguments eg. varchar(32)
if args is None or args == '':
type_args = []
elif args[0] == "'" and args[-1] == "'":
type_args = self._re_csv_str.findall(args)
else:
type_args = [int(v) for v in self._re_csv_int.findall(args)]
# Column type keyword options
type_kw = {}
for kw in ('unsigned', 'zerofill'):
if spec.get(kw, False):
type_kw[kw] = True
for kw in ('charset', 'collate'):
if spec.get(kw, False):
type_kw[kw] = spec[kw]
if issubclass(col_type, _EnumeratedValues):
type_args = _EnumeratedValues._strip_values(type_args)
if issubclass(col_type, SET) and '' in type_args:
type_kw['retrieve_as_bitwise'] = True
type_instance = col_type(*type_args, **type_kw)
col_kw = {}
# NOT NULL
col_kw['nullable'] = True
# this can be "NULL" in the case of TIMESTAMP
if spec.get('notnull', False) == 'NOT NULL':
col_kw['nullable'] = False
# AUTO_INCREMENT
if spec.get('autoincr', False):
col_kw['autoincrement'] = True
elif issubclass(col_type, sqltypes.Integer):
col_kw['autoincrement'] = False
# DEFAULT
default = spec.get('default', None)
if default == 'NULL':
# eliminates the need to deal with this later.
default = None
col_d = dict(name=name, type=type_instance, default=default)
col_d.update(col_kw)
state.columns.append(col_d)
def _describe_to_create(self, table_name, columns):
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
DESCRIBE is a much simpler reflection and is sufficient for
reflecting views for runtime use. This method formats DDL
for columns only- keys are omitted.
:param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
SHOW FULL COLUMNS FROM rows must be rearranged for use with
this function.
"""
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = \
[row[i] for i in (0, 1, 2, 4, 5)]
line = [' ']
line.append(self.preparer.quote_identifier(name))
line.append(col_type)
if not nullable:
line.append('NOT NULL')
if default:
if 'auto_increment' in default:
pass
elif (col_type.startswith('timestamp') and
default.startswith('C')):
line.append('DEFAULT')
line.append(default)
elif default == 'NULL':
line.append('DEFAULT')
line.append(default)
else:
line.append('DEFAULT')
line.append("'%s'" % default.replace("'", "''"))
if extra:
line.append(extra)
buffer.append(' '.join(line))
return ''.join([('CREATE TABLE %s (\n' %
self.preparer.quote_identifier(table_name)),
',\n'.join(buffer),
'\n) '])
def _parse_keyexprs(self, identifiers):
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
return self._re_keyexprs.findall(identifiers)
def _prep_regexes(self):
"""Pre-compile regular expressions."""
self._re_columns = []
self._pr_options = []
_final = self.preparer.final_quote
quotes = dict(zip(('iq', 'fq', 'esc_fq'),
[re.escape(s) for s in
(self.preparer.initial_quote,
_final,
self.preparer._escape_identifier(_final))]))
self._pr_name = _pr_compile(
r'^CREATE (?:\w+ +)?TABLE +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
self.preparer._unescape_identifier)
# `col`,`col2`(32),`col3`(15) DESC
#
# Note: ASC and DESC aren't reflected, so we'll punt...
self._re_keyexprs = _re_compile(
r'(?:'
r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
# 123 or 123,456
self._re_csv_int = _re_compile(r'\d+')
# `colname` <type> [type opts]
# (NOT NULL | NULL)
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
# COMMENT 'comment'
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
# STORAGE (DISK|MEMORY)
self._re_column = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
r'(?: +(?P<unsigned>UNSIGNED))?'
r'(?: +(?P<zerofill>ZEROFILL))?'
r'(?: +CHARACTER SET +(?P<charset>[\w_]+))?'
r'(?: +COLLATE +(?P<collate>[\w_]+))?'
r'(?: +(?P<notnull>(?:NOT )?NULL))?'
r'(?: +DEFAULT +(?P<default>'
r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
r'(?: +ON UPDATE \w+)?)'
r'))?'
r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
r'(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?'
r'(?: +STORAGE +(?P<storage>\w+))?'
r'(?: +(?P<extra>.*))?'
r',?$'
% quotes
)
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
r'.*?(?P<notnull>(?:NOT )NULL)?'
% quotes
)
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
# KEY_BLOCK_SIZE size | WITH PARSER name
self._re_key = _re_compile(
r' '
r'(?:(?P<type>\S+) )?KEY'
r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
r'(?: +USING +(?P<using_pre>\S+))?'
r' +\((?P<columns>.+?)\)'
r'(?: +USING +(?P<using_post>\S+))?'
r'(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?'
r'(?: +WITH PARSER +(?P<parser>\S+))?'
r',?$'
% quotes
)
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
# ON DELETE CASCADE ON UPDATE RESTRICT
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw['on'] = 'RESTRICT|CASCADE|SET NULL|NOACTION'
self._re_constraint = _re_compile(
r' '
r'CONSTRAINT +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'FOREIGN KEY +'
r'\((?P<local>[^\)]+?)\) REFERENCES +'
r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s'
r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
r'\((?P<foreign>[^\)]+?)\)'
r'(?: +(?P<match>MATCH \w+))?'
r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
% kw
)
# PARTITION
#
# punt!
self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
# Table-level options (COLLATE, ENGINE, etc.)
# Do the string options first, since they have quoted
# strings we need to get rid of.
for option in _options_of_type_string:
self._add_option_string(option)
for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
'AVG_ROW_LENGTH', 'CHARACTER SET',
'DEFAULT CHARSET', 'CHECKSUM',
'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
'KEY_BLOCK_SIZE'):
self._add_option_word(option)
self._add_option_regex('UNION', r'\([^\)]+\)')
self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
self._add_option_regex(
'RAID_TYPE',
r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
_optional_equals = r'(?:\s*(?:=\s*)|\s+)'
def _add_option_string(self, directive):
regex = (r'(?P<directive>%s)%s'
r"'(?P<val>(?:[^']|'')*?)'(?!')" %
(re.escape(directive), self._optional_equals))
self._pr_options.append(_pr_compile(
regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
))
def _add_option_word(self, directive):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>\w+)' %
(re.escape(directive), self._optional_equals))
self._pr_options.append(_pr_compile(regex))
def _add_option_regex(self, directive, regex):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>%s)' %
(re.escape(directive), self._optional_equals, regex))
self._pr_options.append(_pr_compile(regex))
_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
'PASSWORD', 'CONNECTION')
class _DecodingRowProxy(object):
"""Return unicode-decoded values based on type inspection.
Smooth over data type issues (esp. with alpha driver versions) and
normalize strings as Unicode regardless of user-configured driver
encoding settings.
"""
# Some MySQL-python versions can return some columns as
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
_encoding_compat = {
'koi8r': 'koi8_r',
'koi8u': 'koi8_u',
'utf16': 'utf-16-be', # MySQL's uft16 is always bigendian
'utf8mb4': 'utf8', # real utf8
'eucjpms': 'ujis',
}
def __init__(self, rowproxy, charset):
self.rowproxy = rowproxy
self.charset = self._encoding_compat.get(charset, charset)
def __getitem__(self, index):
item = self.rowproxy[index]
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
def __getattr__(self, attr):
item = getattr(self.rowproxy, attr)
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
def _pr_compile(regex, cleanup=None):
"""Prepare a 2-tuple of compiled regex and callable."""
return (_re_compile(regex), cleanup)
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE)
| {
"content_hash": "711aa15b6ab2ca2ef59d75bf0c74d423",
"timestamp": "",
"source": "github",
"line_count": 3391,
"max_line_length": 94,
"avg_line_length": 35.720731347685046,
"alnum_prop": 0.5828331778517118,
"repo_name": "hsum/sqlalchemy",
"id": "2c78de2fc685f4245b55fe2aa5e24bc923eba33a",
"size": "121363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/dialects/mysql/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46062"
},
{
"name": "Python",
"bytes": "8792776"
}
],
"symlink_target": ""
} |
import os
from twilio.rest import Client
# To set up environmental variables, see http://twil.io/secure
ACCOUNT_SID = os.environ['TWILIO_ACCOUNT_SID']
AUTH_TOKEN = os.environ['TWILIO_AUTH_TOKEN']
client = Client(ACCOUNT_SID, AUTH_TOKEN)
binding = client.notify.v1.services(sid='ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.bindings.create(
endpoint='XXXXXXXXXXXXXXX',
# We recommend using a GUID or other anonymized identifier for Identity
identity='00000001',
binding_type='gcm',
address='gcm_device_token',
tag=['premium', 'new user'])
print(binding.sid)
| {
"content_hash": "5d6cc7c984fee501e3b12bb12501bb91",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 33.72222222222222,
"alnum_prop": 0.7018121911037891,
"repo_name": "TwilioDevEd/api-snippets",
"id": "b1134beb272f72902fdc72fc6044f1baf79d58b7",
"size": "701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifications/register/create-binding-server/create-binding-server.6.x.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
} |
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
submitted_notifications = models.BooleanField(
default=True,
help_text=_("Receive notification when a page is submitted for moderation")
)
approved_notifications = models.BooleanField(
default=True,
help_text=_("Receive notification when your page edit is approved")
)
rejected_notifications = models.BooleanField(
default=True,
help_text=_("Receive notification when your page edit is rejected")
)
@classmethod
def get_for_user(cls, user):
return cls.objects.get_or_create(user=user)[0]
def __str__(self):
return self.user.username
| {
"content_hash": "5704685a47b78c5a4bbac0e1fdea7b9e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 87,
"avg_line_length": 31.870967741935484,
"alnum_prop": 0.6700404858299596,
"repo_name": "lojack/wagtail",
"id": "eb0aa0c9efbb34f2cc90d612471b9eeaf802bfe1",
"size": "988",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "wagtail/wagtailusers/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "127495"
},
{
"name": "D",
"bytes": "2012"
},
{
"name": "JavaScript",
"bytes": "41818"
},
{
"name": "Python",
"bytes": "831307"
},
{
"name": "Shell",
"bytes": "8483"
}
],
"symlink_target": ""
} |
import os
def get_top_directory():
import asciimap
return os.path.basename(asciimap.__path__[0])
def get_test_module():
return get_top_directory().replace("/", ".")
| {
"content_hash": "5f7e5f5ad97aec834f3b75b1856bdc36",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 49,
"avg_line_length": 20,
"alnum_prop": 0.65,
"repo_name": "oubiwann/ascii-mapper",
"id": "89350a1061c243b6b8981a18bdea3ff8da431542",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asciimap/testing/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46996"
}
],
"symlink_target": ""
} |
import os
import unittest
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from systestlib import DutSystemTest
class TestApiMlag(DutSystemTest):
def test_get(self):
for dut in self.duts:
dut.config(['no interface Port-Channel1-2000',
'default mlag configuration'])
response = dut.api('mlag').get()
config = dict(domain_id=None, local_interface=None,
peer_link=None, peer_address=None, shutdown=False)
values = dict(config=config, interfaces=dict())
self.assertEqual(values, response)
def test_set_domain_id_with_value(self):
for dut in self.duts:
dut.config('default mlag configuration')
api = dut.api('mlag')
self.assertIn('no domain-id', api.get_block('mlag configuration'))
result = dut.api('mlag').set_domain_id('test')
self.assertTrue(result)
self.assertIn('domain-id test', api.get_block('mlag configuration'))
def test_set_domain_id_with_no_value(self):
for dut in self.duts:
dut.config(['mlag configuration', 'domain-id test'])
api = dut.api('mlag')
self.assertIn('domain-id test', api.get_block('mlag configuration'))
result = dut.api('mlag').set_domain_id()
self.assertTrue(result)
self.assertIn('no domain-id', api.get_block('mlag configuration'))
def test_set_domain_id_with_default(self):
for dut in self.duts:
dut.config(['mlag configuration', 'domain-id test'])
api = dut.api('mlag')
self.assertIn('domain-id test', api.get_block('mlag configuration'))
result = dut.api('mlag').set_domain_id(default=True)
self.assertTrue(result)
self.assertIn('no domain-id', api.get_block('mlag configuration'))
def test_set_local_interface_with_value(self):
for dut in self.duts:
dut.config('default mlag configuration')
api = dut.api('mlag')
self.assertIn('no local-interface', api.get_block('mlag configuration'))
result = dut.api('mlag').set_local_interface('Vlan1234')
self.assertTrue(result)
self.assertIn('local-interface Vlan1234', api.get_block('mlag configuration'))
def test_set_local_interface_with_no_value(self):
for dut in self.duts:
dut.config(['interface Vlan1234', 'mlag configuration', 'local-interface Vlan1234'])
api = dut.api('mlag')
self.assertIn('local-interface Vlan1234', api.get_block('mlag configuration'))
result = api.set_local_interface()
self.assertTrue(result)
self.assertIn('no local-interface', api.get_block('mlag configuration'))
def test_set_local_interface_with_default(self):
for dut in self.duts:
dut.config(['interface Vlan1234', 'mlag configuration', 'local-interface Vlan1234'])
api = dut.api('mlag')
self.assertIn('local-interface Vlan1234', api.get_block('mlag configuration'))
result = api.set_local_interface(default=True)
self.assertTrue(result)
self.assertIn('no local-interface', api.get_block('mlag configuration'))
def test_set_peer_address_with_value(self):
for dut in self.duts:
dut.config('default mlag configuration')
api = dut.api('mlag')
self.assertIn('no peer-address', api.get_block('mlag configuration'))
result = dut.api('mlag').set_peer_address('1.2.3.4')
self.assertTrue(result)
self.assertIn('peer-address 1.2.3.4', api.get_block('mlag configuration'))
def test_set_peer_address_with_no_value(self):
for dut in self.duts:
dut.config(['interface Vlan1234', 'ip address 1.2.3.1/24',
'mlag configuration', 'peer-address 1.2.3.4'])
api = dut.api('mlag')
self.assertIn('peer-address 1.2.3.4', api.get_block('mlag configuration'))
result = api.set_peer_address()
self.assertTrue(result)
self.assertIn('no peer-address', api.get_block('mlag configuration'))
def test_set_peer_address_with_default(self):
for dut in self.duts:
dut.config(['interface Vlan1234', 'ip address 1.2.3.1/24',
'mlag configuration', 'peer-address 1.2.3.4'])
api = dut.api('mlag')
self.assertIn('peer-address 1.2.3.4', api.get_block('mlag configuration'))
result = api.set_peer_address(default=True)
self.assertTrue(result)
self.assertIn('no peer-address', api.get_block('mlag configuration'))
def test_set_peer_link_with_value(self):
for dut in self.duts:
dut.config('default mlag configuration')
api = dut.api('mlag')
self.assertIn('no peer-link', api.get_block('mlag configuration'))
result = dut.api('mlag').set_peer_link('Ethernet1')
self.assertTrue(result)
self.assertIn('peer-link Ethernet1', api.get_block('mlag configuration'))
def test_set_peer_link_with_no_value(self):
for dut in self.duts:
dut.config(['mlag configuration', 'peer-link Ethernet1'])
api = dut.api('mlag')
self.assertIn('peer-link Ethernet1', api.get_block('mlag configuration'))
result = api.set_peer_link()
self.assertTrue(result)
self.assertIn('no peer-link', api.get_block('mlag configuration'))
def test_set_peer_link_with_default(self):
for dut in self.duts:
dut.config(['mlag configuration', 'peer-link Ethernet1'])
api = dut.api('mlag')
self.assertIn('peer-link Ethernet1', api.get_block('mlag configuration'))
result = api.set_peer_link(default=True)
self.assertTrue(result)
self.assertIn('no peer-link', api.get_block('mlag configuration'))
def test_set_shutdown_with_true(self):
for dut in self.duts:
dut.config('default mlag configuration')
api = dut.api('mlag')
self.assertIn('no shutdown', api.get_block('mlag configuration'))
result = api.set_shutdown(True)
self.assertTrue(result)
self.assertIn('shutdown', api.get_block('mlag configuration'))
def test_set_shutdown_with_false(self):
for dut in self.duts:
dut.config(['mlag configuration', 'shutdown'])
api = dut.api('mlag')
self.assertIn('shutdown', api.get_block('mlag configuration'))
result = api.set_shutdown(False)
self.assertTrue(result)
self.assertIn('no shutdown', api.get_block('mlag configuration'))
def test_set_shutdown_with_no_value(self):
for dut in self.duts:
dut.config(['mlag configuration', 'shutdown'])
api = dut.api('mlag')
self.assertIn('shutdown', api.get_block('mlag configuration'))
result = api.set_shutdown()
self.assertTrue(result)
self.assertIn('no shutdown', api.get_block('mlag configuration'))
def test_set_shutdown_with_default(self):
for dut in self.duts:
dut.config(['mlag configuration', 'shutdown'])
api = dut.api('mlag')
self.assertIn('shutdown', api.get_block('mlag configuration'))
result = api.set_shutdown(default=True)
self.assertTrue(result)
self.assertIn('no shutdown', api.get_block('mlag configuration'))
def test_set_mlag_id_with_value(self):
for dut in self.duts:
dut.config('no interface Port-Channel10')
api = dut.api('mlag')
self.assertIsNone(api.get_block('interface Port-Channel10'))
result = api.set_mlag_id('Port-Channel10', '100')
self.assertTrue(result)
self.assertIn('mlag 100', api.get_block('interface Port-Channel10'))
def test_set_mlag_id_with_no_value(self):
for dut in self.duts:
dut.config(['no interface Port-Channel10',
'interface Port-Channel10', 'mlag 100'])
api = dut.api('mlag')
self.assertIn('mlag 100', api.get_block('interface Port-Channel10'))
result = api.set_mlag_id('Port-Channel10')
self.assertTrue(result)
self.assertIn('no mlag', api.get_block('interface Port-Channel10'))
def test_set_mlag_id_with_default(self):
for dut in self.duts:
dut.config(['no interface Port-Channel10',
'interface Port-Channel10', 'mlag 100'])
api = dut.api('mlag')
self.assertIn('mlag 100', api.get_block('interface Port-Channel10'))
result = api.set_mlag_id('Port-Channel10', default=True)
self.assertTrue(result)
self.assertIn('no mlag', api.get_block('interface Port-Channel10'))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "fdb06353e00a5c20c4068bead691c8d9",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 96,
"avg_line_length": 45.89949748743719,
"alnum_prop": 0.5943726735274798,
"repo_name": "SivagnanamCiena/pyeapi",
"id": "b77b65a8e840252e3731c9a42f904ed5c9f469cd",
"size": "10664",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "test/system/test_api_mlag.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1521"
},
{
"name": "Python",
"bytes": "363606"
}
],
"symlink_target": ""
} |
import datetime
import pytz
from unittest import TestCase
from mock import patch, Mock, call
from purchasing.jobs.job_base import JobBase, EmailJobBase
from purchasing_test.factories import JobStatusFactory
import logging
logging.getLogger("factory").setLevel(logging.WARN)
class FakeJobBase(JobBase):
jobs = []
@property
def start_time(self):
return None
@property
def job_status_model(self):
return JobStatusFactory
@FakeJobBase.register
class FakeJob(FakeJobBase):
pass
class PastJob(FakeJobBase):
@property
def start_time(self):
return (datetime.datetime.utcnow() - datetime.timedelta(minutes=1)).replace(tzinfo=pytz.UTC)
class FutureJob(FakeJobBase):
@property
def start_time(self):
return (datetime.datetime.utcnow() + datetime.timedelta(minutes=1)).replace(tzinfo=pytz.UTC)
class FakeEmailJob(EmailJobBase):
@property
def job_status_model(self):
return JobStatusFactory
class TestJobBase(TestCase):
def test_register_job(self):
self.assertEquals(len(FakeJobBase.jobs), 1)
self.assertTrue(FakeJob in FakeJobBase.jobs)
@patch('purchasing.jobs.job_base.get_or_create', return_value=[JobStatusFactory.build(), True])
def test_schedule_timer_no_time(self, get_or_create):
FakeJob().schedule_job()
self.assertTrue(get_or_create.called)
@patch('purchasing.jobs.job_base.get_or_create', return_value=[JobStatusFactory.build(), True])
def test_schedule_timer_past_job(self, get_or_create):
PastJob().schedule_job()
self.assertTrue(get_or_create.called)
@patch('purchasing.jobs.job_base.get_or_create', return_value=[JobStatusFactory.build(), True])
def test_schedule_timer_future_job(self, get_or_create):
FutureJob().schedule_job()
self.assertFalse(get_or_create.called)
class TestEmailJobBase(TestCase):
def setUp(self):
job_mock = Mock()
job_mock.update = Mock()
self.job = job_mock
notification_mock = Mock()
notification_mock.send = Mock()
notification_fail = Mock()
notification_fail.send = Mock(side_effect=Exception('something went wrong!'))
self.success_notification = notification_mock
self.failure_notification = notification_fail
def tearDown(self):
self.job.reset_mock()
def test_all_successful(self):
send_mock = Mock()
send_mock.return_value = [self.success_notification, self.success_notification]
FakeEmailJob.build_notifications = send_mock
expected_updates = [call.update(status='started'), call.update(status='success')]
FakeEmailJob().run_job(self.job)
self.assertEquals(self.job.mock_calls, expected_updates)
def test_some_failures(self):
send_mock = Mock()
send_mock.return_value = [self.success_notification, self.failure_notification]
FakeEmailJob.build_notifications = send_mock
expected_updates = [
call.update(status='started'),
call.update(status='failed', info='something went wrong!')
]
FakeEmailJob().run_job(self.job)
self.assertEquals(self.job.mock_calls, expected_updates)
| {
"content_hash": "7f3ceb7e999cc6af96c8bdf65ab595ed",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 100,
"avg_line_length": 30.83809523809524,
"alnum_prop": 0.6815935762816553,
"repo_name": "ajb/pittsburgh-purchasing-suite",
"id": "e7415bee26860efd2d655805b1ec0ca5a90d3c14",
"size": "3263",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "purchasing_test/unit/jobs/test_job_base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "22578"
},
{
"name": "HTML",
"bytes": "298440"
},
{
"name": "JavaScript",
"bytes": "14095"
},
{
"name": "Makefile",
"bytes": "199"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "521766"
},
{
"name": "Shell",
"bytes": "3018"
}
],
"symlink_target": ""
} |
import os
# try/except added for compatibility with python < 3.8
try:
from unittest import mock
from unittest.mock import AsyncMock # pragma: NO COVER
except ImportError: # pragma: NO COVER
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from proto.marshal.rules import wrappers
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1.services.metadata_service import (
MetadataServiceAsyncClient,
)
from google.cloud.aiplatform_v1.services.metadata_service import MetadataServiceClient
from google.cloud.aiplatform_v1.services.metadata_service import pagers
from google.cloud.aiplatform_v1.services.metadata_service import transports
from google.cloud.aiplatform_v1.types import artifact
from google.cloud.aiplatform_v1.types import artifact as gca_artifact
from google.cloud.aiplatform_v1.types import context
from google.cloud.aiplatform_v1.types import context as gca_context
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import event
from google.cloud.aiplatform_v1.types import execution
from google.cloud.aiplatform_v1.types import execution as gca_execution
from google.cloud.aiplatform_v1.types import lineage_subgraph
from google.cloud.aiplatform_v1.types import metadata_schema
from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema
from google.cloud.aiplatform_v1.types import metadata_service
from google.cloud.aiplatform_v1.types import metadata_store
from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.cloud.location import locations_pb2
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import options_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert MetadataServiceClient._get_default_mtls_endpoint(None) is None
assert (
MetadataServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
MetadataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
MetadataServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
MetadataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
MetadataServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class,transport_name",
[
(MetadataServiceClient, "grpc"),
(MetadataServiceAsyncClient, "grpc_asyncio"),
],
)
def test_metadata_service_client_from_service_account_info(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("aiplatform.googleapis.com:443")
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.MetadataServiceGrpcTransport, "grpc"),
(transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_metadata_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name",
[
(MetadataServiceClient, "grpc"),
(MetadataServiceAsyncClient, "grpc_asyncio"),
],
)
def test_metadata_service_client_from_service_account_file(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("aiplatform.googleapis.com:443")
def test_metadata_service_client_get_transport_class():
transport = MetadataServiceClient.get_transport_class()
available_transports = [
transports.MetadataServiceGrpcTransport,
]
assert transport in available_transports
transport = MetadataServiceClient.get_transport_class("grpc")
assert transport == transports.MetadataServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"),
(
MetadataServiceAsyncClient,
transports.MetadataServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
MetadataServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MetadataServiceClient),
)
@mock.patch.object(
MetadataServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MetadataServiceAsyncClient),
)
def test_metadata_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(MetadataServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(MetadataServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is provided
options = client_options.ClientOptions(
api_audience="https://language.googleapis.com"
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience="https://language.googleapis.com",
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
MetadataServiceClient,
transports.MetadataServiceGrpcTransport,
"grpc",
"true",
),
(
MetadataServiceAsyncClient,
transports.MetadataServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
MetadataServiceClient,
transports.MetadataServiceGrpcTransport,
"grpc",
"false",
),
(
MetadataServiceAsyncClient,
transports.MetadataServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
MetadataServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MetadataServiceClient),
)
@mock.patch.object(
MetadataServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MetadataServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_metadata_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class", [MetadataServiceClient, MetadataServiceAsyncClient]
)
@mock.patch.object(
MetadataServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MetadataServiceClient),
)
@mock.patch.object(
MetadataServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MetadataServiceAsyncClient),
)
def test_metadata_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"),
(
MetadataServiceAsyncClient,
transports.MetadataServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_metadata_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
MetadataServiceClient,
transports.MetadataServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
MetadataServiceAsyncClient,
transports.MetadataServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_metadata_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
def test_metadata_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = MetadataServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
MetadataServiceClient,
transports.MetadataServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
MetadataServiceAsyncClient,
transports.MetadataServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_metadata_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.CreateMetadataStoreRequest,
dict,
],
)
def test_create_metadata_store(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_metadata_store_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
client.create_metadata_store()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataStoreRequest()
@pytest.mark.asyncio
async def test_create_metadata_store_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.CreateMetadataStoreRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_metadata_store_async_from_dict():
await test_create_metadata_store_async(request_type=dict)
def test_create_metadata_store_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateMetadataStoreRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_metadata_store_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateMetadataStoreRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_metadata_store_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_metadata_store(
parent="parent_value",
metadata_store=gca_metadata_store.MetadataStore(name="name_value"),
metadata_store_id="metadata_store_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].metadata_store
mock_val = gca_metadata_store.MetadataStore(name="name_value")
assert arg == mock_val
arg = args[0].metadata_store_id
mock_val = "metadata_store_id_value"
assert arg == mock_val
def test_create_metadata_store_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_metadata_store(
metadata_service.CreateMetadataStoreRequest(),
parent="parent_value",
metadata_store=gca_metadata_store.MetadataStore(name="name_value"),
metadata_store_id="metadata_store_id_value",
)
@pytest.mark.asyncio
async def test_create_metadata_store_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_metadata_store(
parent="parent_value",
metadata_store=gca_metadata_store.MetadataStore(name="name_value"),
metadata_store_id="metadata_store_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].metadata_store
mock_val = gca_metadata_store.MetadataStore(name="name_value")
assert arg == mock_val
arg = args[0].metadata_store_id
mock_val = "metadata_store_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_metadata_store_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_metadata_store(
metadata_service.CreateMetadataStoreRequest(),
parent="parent_value",
metadata_store=gca_metadata_store.MetadataStore(name="name_value"),
metadata_store_id="metadata_store_id_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.GetMetadataStoreRequest,
dict,
],
)
def test_get_metadata_store(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_store.MetadataStore(
name="name_value",
description="description_value",
)
response = client.get_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_store.MetadataStore)
assert response.name == "name_value"
assert response.description == "description_value"
def test_get_metadata_store_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
client.get_metadata_store()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataStoreRequest()
@pytest.mark.asyncio
async def test_get_metadata_store_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.GetMetadataStoreRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_store.MetadataStore(
name="name_value",
description="description_value",
)
)
response = await client.get_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_store.MetadataStore)
assert response.name == "name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_metadata_store_async_from_dict():
await test_get_metadata_store_async(request_type=dict)
def test_get_metadata_store_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetMetadataStoreRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
call.return_value = metadata_store.MetadataStore()
client.get_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_metadata_store_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetMetadataStoreRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_store.MetadataStore()
)
await client.get_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_get_metadata_store_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_store.MetadataStore()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_metadata_store(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_metadata_store_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_metadata_store(
metadata_service.GetMetadataStoreRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_metadata_store_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_store.MetadataStore()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_store.MetadataStore()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_metadata_store(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_metadata_store_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_metadata_store(
metadata_service.GetMetadataStoreRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.ListMetadataStoresRequest,
dict,
],
)
def test_list_metadata_stores(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataStoresResponse(
next_page_token="next_page_token_value",
)
response = client.list_metadata_stores(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataStoresRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMetadataStoresPager)
assert response.next_page_token == "next_page_token_value"
def test_list_metadata_stores_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
client.list_metadata_stores()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataStoresRequest()
@pytest.mark.asyncio
async def test_list_metadata_stores_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.ListMetadataStoresRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataStoresResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_metadata_stores(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataStoresRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMetadataStoresAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_metadata_stores_async_from_dict():
await test_list_metadata_stores_async(request_type=dict)
def test_list_metadata_stores_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListMetadataStoresRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
call.return_value = metadata_service.ListMetadataStoresResponse()
client.list_metadata_stores(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_metadata_stores_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListMetadataStoresRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataStoresResponse()
)
await client.list_metadata_stores(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_list_metadata_stores_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataStoresResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_metadata_stores(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_metadata_stores_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_metadata_stores(
metadata_service.ListMetadataStoresRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_metadata_stores_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataStoresResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataStoresResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_metadata_stores(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_metadata_stores_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_metadata_stores(
metadata_service.ListMetadataStoresRequest(),
parent="parent_value",
)
def test_list_metadata_stores_pager(transport_name: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
next_page_token="abc",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[],
next_page_token="def",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
],
next_page_token="ghi",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_metadata_stores(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, metadata_store.MetadataStore) for i in results)
def test_list_metadata_stores_pages(transport_name: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
next_page_token="abc",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[],
next_page_token="def",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
],
next_page_token="ghi",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
),
RuntimeError,
)
pages = list(client.list_metadata_stores(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_metadata_stores_async_pager():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
next_page_token="abc",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[],
next_page_token="def",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
],
next_page_token="ghi",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
),
RuntimeError,
)
async_pager = await client.list_metadata_stores(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager: # pragma: no branch
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, metadata_store.MetadataStore) for i in responses)
@pytest.mark.asyncio
async def test_list_metadata_stores_async_pages():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_stores),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
next_page_token="abc",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[],
next_page_token="def",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
],
next_page_token="ghi",
),
metadata_service.ListMetadataStoresResponse(
metadata_stores=[
metadata_store.MetadataStore(),
metadata_store.MetadataStore(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_metadata_stores(request={})
).pages: # pragma: no branch
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
metadata_service.DeleteMetadataStoreRequest,
dict,
],
)
def test_delete_metadata_store(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_metadata_store_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
client.delete_metadata_store()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteMetadataStoreRequest()
@pytest.mark.asyncio
async def test_delete_metadata_store_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.DeleteMetadataStoreRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteMetadataStoreRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_metadata_store_async_from_dict():
await test_delete_metadata_store_async(request_type=dict)
def test_delete_metadata_store_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteMetadataStoreRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_metadata_store_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteMetadataStoreRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_metadata_store(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_delete_metadata_store_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_metadata_store(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_metadata_store_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_metadata_store(
metadata_service.DeleteMetadataStoreRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_metadata_store_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_metadata_store), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_metadata_store(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_metadata_store_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_metadata_store(
metadata_service.DeleteMetadataStoreRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.CreateArtifactRequest,
dict,
],
)
def test_create_artifact(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=gca_artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.create_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == gca_artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_create_artifact_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
client.create_artifact()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateArtifactRequest()
@pytest.mark.asyncio
async def test_create_artifact_async(
transport: str = "grpc_asyncio", request_type=metadata_service.CreateArtifactRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=gca_artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.create_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == gca_artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_artifact_async_from_dict():
await test_create_artifact_async(request_type=dict)
def test_create_artifact_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateArtifactRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
call.return_value = gca_artifact.Artifact()
client.create_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_artifact_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateArtifactRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact()
)
await client.create_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_artifact_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_artifact(
parent="parent_value",
artifact=gca_artifact.Artifact(name="name_value"),
artifact_id="artifact_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].artifact
mock_val = gca_artifact.Artifact(name="name_value")
assert arg == mock_val
arg = args[0].artifact_id
mock_val = "artifact_id_value"
assert arg == mock_val
def test_create_artifact_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_artifact(
metadata_service.CreateArtifactRequest(),
parent="parent_value",
artifact=gca_artifact.Artifact(name="name_value"),
artifact_id="artifact_id_value",
)
@pytest.mark.asyncio
async def test_create_artifact_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_artifact(
parent="parent_value",
artifact=gca_artifact.Artifact(name="name_value"),
artifact_id="artifact_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].artifact
mock_val = gca_artifact.Artifact(name="name_value")
assert arg == mock_val
arg = args[0].artifact_id
mock_val = "artifact_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_artifact_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_artifact(
metadata_service.CreateArtifactRequest(),
parent="parent_value",
artifact=gca_artifact.Artifact(name="name_value"),
artifact_id="artifact_id_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.GetArtifactRequest,
dict,
],
)
def test_get_artifact(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.get_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_get_artifact_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
client.get_artifact()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetArtifactRequest()
@pytest.mark.asyncio
async def test_get_artifact_async(
transport: str = "grpc_asyncio", request_type=metadata_service.GetArtifactRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.get_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_artifact_async_from_dict():
await test_get_artifact_async(request_type=dict)
def test_get_artifact_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetArtifactRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
call.return_value = artifact.Artifact()
client.get_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_artifact_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetArtifactRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact())
await client.get_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_get_artifact_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = artifact.Artifact()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_artifact(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_artifact_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_artifact(
metadata_service.GetArtifactRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_artifact_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = artifact.Artifact()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_artifact(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_artifact_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_artifact(
metadata_service.GetArtifactRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.ListArtifactsRequest,
dict,
],
)
def test_list_artifacts(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListArtifactsResponse(
next_page_token="next_page_token_value",
)
response = client.list_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListArtifactsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListArtifactsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_artifacts_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
client.list_artifacts()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListArtifactsRequest()
@pytest.mark.asyncio
async def test_list_artifacts_async(
transport: str = "grpc_asyncio", request_type=metadata_service.ListArtifactsRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListArtifactsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListArtifactsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListArtifactsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_artifacts_async_from_dict():
await test_list_artifacts_async(request_type=dict)
def test_list_artifacts_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListArtifactsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
call.return_value = metadata_service.ListArtifactsResponse()
client.list_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_artifacts_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListArtifactsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListArtifactsResponse()
)
await client.list_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_list_artifacts_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListArtifactsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_artifacts(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_artifacts_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_artifacts(
metadata_service.ListArtifactsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_artifacts_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListArtifactsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListArtifactsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_artifacts(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_artifacts_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_artifacts(
metadata_service.ListArtifactsRequest(),
parent="parent_value",
)
def test_list_artifacts_pager(transport_name: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
artifact.Artifact(),
],
next_page_token="abc",
),
metadata_service.ListArtifactsResponse(
artifacts=[],
next_page_token="def",
),
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
],
next_page_token="ghi",
),
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_artifacts(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, artifact.Artifact) for i in results)
def test_list_artifacts_pages(transport_name: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_artifacts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
artifact.Artifact(),
],
next_page_token="abc",
),
metadata_service.ListArtifactsResponse(
artifacts=[],
next_page_token="def",
),
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
],
next_page_token="ghi",
),
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
],
),
RuntimeError,
)
pages = list(client.list_artifacts(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_artifacts_async_pager():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_artifacts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
artifact.Artifact(),
],
next_page_token="abc",
),
metadata_service.ListArtifactsResponse(
artifacts=[],
next_page_token="def",
),
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
],
next_page_token="ghi",
),
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
],
),
RuntimeError,
)
async_pager = await client.list_artifacts(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager: # pragma: no branch
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, artifact.Artifact) for i in responses)
@pytest.mark.asyncio
async def test_list_artifacts_async_pages():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_artifacts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
artifact.Artifact(),
],
next_page_token="abc",
),
metadata_service.ListArtifactsResponse(
artifacts=[],
next_page_token="def",
),
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
],
next_page_token="ghi",
),
metadata_service.ListArtifactsResponse(
artifacts=[
artifact.Artifact(),
artifact.Artifact(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_artifacts(request={})
).pages: # pragma: no branch
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
metadata_service.UpdateArtifactRequest,
dict,
],
)
def test_update_artifact(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=gca_artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.update_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == gca_artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_update_artifact_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
client.update_artifact()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateArtifactRequest()
@pytest.mark.asyncio
async def test_update_artifact_async(
transport: str = "grpc_asyncio", request_type=metadata_service.UpdateArtifactRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact(
name="name_value",
display_name="display_name_value",
uri="uri_value",
etag="etag_value",
state=gca_artifact.Artifact.State.PENDING,
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.update_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_artifact.Artifact)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.uri == "uri_value"
assert response.etag == "etag_value"
assert response.state == gca_artifact.Artifact.State.PENDING
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_artifact_async_from_dict():
await test_update_artifact_async(request_type=dict)
def test_update_artifact_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateArtifactRequest()
request.artifact.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
call.return_value = gca_artifact.Artifact()
client.update_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"artifact.name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_artifact_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateArtifactRequest()
request.artifact.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact()
)
await client.update_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"artifact.name=name_value",
) in kw["metadata"]
def test_update_artifact_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_artifact(
artifact=gca_artifact.Artifact(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].artifact
mock_val = gca_artifact.Artifact(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_artifact_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_artifact(
metadata_service.UpdateArtifactRequest(),
artifact=gca_artifact.Artifact(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_artifact_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_artifact.Artifact()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_artifact.Artifact()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_artifact(
artifact=gca_artifact.Artifact(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].artifact
mock_val = gca_artifact.Artifact(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_artifact_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_artifact(
metadata_service.UpdateArtifactRequest(),
artifact=gca_artifact.Artifact(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.DeleteArtifactRequest,
dict,
],
)
def test_delete_artifact(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_artifact_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call:
client.delete_artifact()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteArtifactRequest()
@pytest.mark.asyncio
async def test_delete_artifact_async(
transport: str = "grpc_asyncio", request_type=metadata_service.DeleteArtifactRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteArtifactRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_artifact_async_from_dict():
await test_delete_artifact_async(request_type=dict)
def test_delete_artifact_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteArtifactRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_artifact_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteArtifactRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_artifact(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_delete_artifact_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_artifact(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_artifact_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_artifact(
metadata_service.DeleteArtifactRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_artifact_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_artifact), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_artifact(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_artifact_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_artifact(
metadata_service.DeleteArtifactRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.PurgeArtifactsRequest,
dict,
],
)
def test_purge_artifacts(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.purge_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.PurgeArtifactsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_purge_artifacts_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call:
client.purge_artifacts()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.PurgeArtifactsRequest()
@pytest.mark.asyncio
async def test_purge_artifacts_async(
transport: str = "grpc_asyncio", request_type=metadata_service.PurgeArtifactsRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.purge_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.PurgeArtifactsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_purge_artifacts_async_from_dict():
await test_purge_artifacts_async(request_type=dict)
def test_purge_artifacts_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.PurgeArtifactsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.purge_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_purge_artifacts_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.PurgeArtifactsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.purge_artifacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_purge_artifacts_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.purge_artifacts(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_purge_artifacts_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.purge_artifacts(
metadata_service.PurgeArtifactsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_purge_artifacts_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_artifacts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.purge_artifacts(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_purge_artifacts_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.purge_artifacts(
metadata_service.PurgeArtifactsRequest(),
parent="parent_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.CreateContextRequest,
dict,
],
)
def test_create_context(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_create_context_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
client.create_context()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateContextRequest()
@pytest.mark.asyncio
async def test_create_context_async(
transport: str = "grpc_asyncio", request_type=metadata_service.CreateContextRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_context_async_from_dict():
await test_create_context_async(request_type=dict)
def test_create_context_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateContextRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
call.return_value = gca_context.Context()
client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_context_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateContextRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context())
await client.create_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_context_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_context(
parent="parent_value",
context=gca_context.Context(name="name_value"),
context_id="context_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].context
mock_val = gca_context.Context(name="name_value")
assert arg == mock_val
arg = args[0].context_id
mock_val = "context_id_value"
assert arg == mock_val
def test_create_context_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_context(
metadata_service.CreateContextRequest(),
parent="parent_value",
context=gca_context.Context(name="name_value"),
context_id="context_id_value",
)
@pytest.mark.asyncio
async def test_create_context_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_context(
parent="parent_value",
context=gca_context.Context(name="name_value"),
context_id="context_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].context
mock_val = gca_context.Context(name="name_value")
assert arg == mock_val
arg = args[0].context_id
mock_val = "context_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_context_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_context(
metadata_service.CreateContextRequest(),
parent="parent_value",
context=gca_context.Context(name="name_value"),
context_id="context_id_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.GetContextRequest,
dict,
],
)
def test_get_context(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_get_context_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
client.get_context()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetContextRequest()
@pytest.mark.asyncio
async def test_get_context_async(
transport: str = "grpc_asyncio", request_type=metadata_service.GetContextRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_context_async_from_dict():
await test_get_context_async(request_type=dict)
def test_get_context_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetContextRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
call.return_value = context.Context()
client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_context_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetContextRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context())
await client.get_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_get_context_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.Context()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_context(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_context_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_context(
metadata_service.GetContextRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_context_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = context.Context()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_context(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_context_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_context(
metadata_service.GetContextRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.ListContextsRequest,
dict,
],
)
def test_list_contexts(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListContextsResponse(
next_page_token="next_page_token_value",
)
response = client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListContextsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListContextsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_contexts_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
client.list_contexts()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListContextsRequest()
@pytest.mark.asyncio
async def test_list_contexts_async(
transport: str = "grpc_asyncio", request_type=metadata_service.ListContextsRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListContextsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListContextsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListContextsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_contexts_async_from_dict():
await test_list_contexts_async(request_type=dict)
def test_list_contexts_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListContextsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
call.return_value = metadata_service.ListContextsResponse()
client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_contexts_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListContextsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListContextsResponse()
)
await client.list_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_list_contexts_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListContextsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_contexts(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_contexts_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_contexts(
metadata_service.ListContextsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_contexts_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListContextsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListContextsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_contexts(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_contexts_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_contexts(
metadata_service.ListContextsRequest(),
parent="parent_value",
)
def test_list_contexts_pager(transport_name: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
context.Context(),
context.Context(),
],
next_page_token="abc",
),
metadata_service.ListContextsResponse(
contexts=[],
next_page_token="def",
),
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
],
next_page_token="ghi",
),
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
context.Context(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_contexts(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, context.Context) for i in results)
def test_list_contexts_pages(transport_name: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_contexts), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
context.Context(),
context.Context(),
],
next_page_token="abc",
),
metadata_service.ListContextsResponse(
contexts=[],
next_page_token="def",
),
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
],
next_page_token="ghi",
),
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
context.Context(),
],
),
RuntimeError,
)
pages = list(client.list_contexts(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_contexts_async_pager():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_contexts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
context.Context(),
context.Context(),
],
next_page_token="abc",
),
metadata_service.ListContextsResponse(
contexts=[],
next_page_token="def",
),
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
],
next_page_token="ghi",
),
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
context.Context(),
],
),
RuntimeError,
)
async_pager = await client.list_contexts(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager: # pragma: no branch
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, context.Context) for i in responses)
@pytest.mark.asyncio
async def test_list_contexts_async_pages():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_contexts), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
context.Context(),
context.Context(),
],
next_page_token="abc",
),
metadata_service.ListContextsResponse(
contexts=[],
next_page_token="def",
),
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
],
next_page_token="ghi",
),
metadata_service.ListContextsResponse(
contexts=[
context.Context(),
context.Context(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_contexts(request={})
).pages: # pragma: no branch
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
metadata_service.UpdateContextRequest,
dict,
],
)
def test_update_context(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_update_context_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
client.update_context()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateContextRequest()
@pytest.mark.asyncio
async def test_update_context_async(
transport: str = "grpc_asyncio", request_type=metadata_service.UpdateContextRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_context.Context(
name="name_value",
display_name="display_name_value",
etag="etag_value",
parent_contexts=["parent_contexts_value"],
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_context.Context)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
assert response.parent_contexts == ["parent_contexts_value"]
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_context_async_from_dict():
await test_update_context_async(request_type=dict)
def test_update_context_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateContextRequest()
request.context.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
call.return_value = gca_context.Context()
client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"context.name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_context_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateContextRequest()
request.context.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context())
await client.update_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"context.name=name_value",
) in kw["metadata"]
def test_update_context_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_context(
context=gca_context.Context(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].context
mock_val = gca_context.Context(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_context_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_context(
metadata_service.UpdateContextRequest(),
context=gca_context.Context(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_context_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_context.Context()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_context(
context=gca_context.Context(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].context
mock_val = gca_context.Context(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_context_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_context(
metadata_service.UpdateContextRequest(),
context=gca_context.Context(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.DeleteContextRequest,
dict,
],
)
def test_delete_context(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_context_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
client.delete_context()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteContextRequest()
@pytest.mark.asyncio
async def test_delete_context_async(
transport: str = "grpc_asyncio", request_type=metadata_service.DeleteContextRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteContextRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_context_async_from_dict():
await test_delete_context_async(request_type=dict)
def test_delete_context_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteContextRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_context_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteContextRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_context(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_delete_context_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_context(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_context_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_context(
metadata_service.DeleteContextRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_context_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_context), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_context(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_context_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_context(
metadata_service.DeleteContextRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.PurgeContextsRequest,
dict,
],
)
def test_purge_contexts(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.purge_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.PurgeContextsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_purge_contexts_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call:
client.purge_contexts()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.PurgeContextsRequest()
@pytest.mark.asyncio
async def test_purge_contexts_async(
transport: str = "grpc_asyncio", request_type=metadata_service.PurgeContextsRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.purge_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.PurgeContextsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_purge_contexts_async_from_dict():
await test_purge_contexts_async(request_type=dict)
def test_purge_contexts_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.PurgeContextsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.purge_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_purge_contexts_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.PurgeContextsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.purge_contexts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_purge_contexts_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.purge_contexts(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_purge_contexts_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.purge_contexts(
metadata_service.PurgeContextsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_purge_contexts_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_contexts), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.purge_contexts(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_purge_contexts_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.purge_contexts(
metadata_service.PurgeContextsRequest(),
parent="parent_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.AddContextArtifactsAndExecutionsRequest,
dict,
],
)
def test_add_context_artifacts_and_executions(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse()
response = client.add_context_artifacts_and_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, metadata_service.AddContextArtifactsAndExecutionsResponse
)
def test_add_context_artifacts_and_executions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
client.add_context_artifacts_and_executions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest()
@pytest.mark.asyncio
async def test_add_context_artifacts_and_executions_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.AddContextArtifactsAndExecutionsRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextArtifactsAndExecutionsResponse()
)
response = await client.add_context_artifacts_and_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, metadata_service.AddContextArtifactsAndExecutionsResponse
)
@pytest.mark.asyncio
async def test_add_context_artifacts_and_executions_async_from_dict():
await test_add_context_artifacts_and_executions_async(request_type=dict)
def test_add_context_artifacts_and_executions_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddContextArtifactsAndExecutionsRequest()
request.context = "context_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse()
client.add_context_artifacts_and_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"context=context_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_add_context_artifacts_and_executions_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddContextArtifactsAndExecutionsRequest()
request.context = "context_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextArtifactsAndExecutionsResponse()
)
await client.add_context_artifacts_and_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"context=context_value",
) in kw["metadata"]
def test_add_context_artifacts_and_executions_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.add_context_artifacts_and_executions(
context="context_value",
artifacts=["artifacts_value"],
executions=["executions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].context
mock_val = "context_value"
assert arg == mock_val
arg = args[0].artifacts
mock_val = ["artifacts_value"]
assert arg == mock_val
arg = args[0].executions
mock_val = ["executions_value"]
assert arg == mock_val
def test_add_context_artifacts_and_executions_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.add_context_artifacts_and_executions(
metadata_service.AddContextArtifactsAndExecutionsRequest(),
context="context_value",
artifacts=["artifacts_value"],
executions=["executions_value"],
)
@pytest.mark.asyncio
async def test_add_context_artifacts_and_executions_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_artifacts_and_executions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextArtifactsAndExecutionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.add_context_artifacts_and_executions(
context="context_value",
artifacts=["artifacts_value"],
executions=["executions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].context
mock_val = "context_value"
assert arg == mock_val
arg = args[0].artifacts
mock_val = ["artifacts_value"]
assert arg == mock_val
arg = args[0].executions
mock_val = ["executions_value"]
assert arg == mock_val
@pytest.mark.asyncio
async def test_add_context_artifacts_and_executions_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.add_context_artifacts_and_executions(
metadata_service.AddContextArtifactsAndExecutionsRequest(),
context="context_value",
artifacts=["artifacts_value"],
executions=["executions_value"],
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.AddContextChildrenRequest,
dict,
],
)
def test_add_context_children(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextChildrenResponse()
response = client.add_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextChildrenRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_service.AddContextChildrenResponse)
def test_add_context_children_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
client.add_context_children()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextChildrenRequest()
@pytest.mark.asyncio
async def test_add_context_children_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.AddContextChildrenRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextChildrenResponse()
)
response = await client.add_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddContextChildrenRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_service.AddContextChildrenResponse)
@pytest.mark.asyncio
async def test_add_context_children_async_from_dict():
await test_add_context_children_async(request_type=dict)
def test_add_context_children_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddContextChildrenRequest()
request.context = "context_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
call.return_value = metadata_service.AddContextChildrenResponse()
client.add_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"context=context_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_add_context_children_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddContextChildrenRequest()
request.context = "context_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextChildrenResponse()
)
await client.add_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"context=context_value",
) in kw["metadata"]
def test_add_context_children_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextChildrenResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.add_context_children(
context="context_value",
child_contexts=["child_contexts_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].context
mock_val = "context_value"
assert arg == mock_val
arg = args[0].child_contexts
mock_val = ["child_contexts_value"]
assert arg == mock_val
def test_add_context_children_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.add_context_children(
metadata_service.AddContextChildrenRequest(),
context="context_value",
child_contexts=["child_contexts_value"],
)
@pytest.mark.asyncio
async def test_add_context_children_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddContextChildrenResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddContextChildrenResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.add_context_children(
context="context_value",
child_contexts=["child_contexts_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].context
mock_val = "context_value"
assert arg == mock_val
arg = args[0].child_contexts
mock_val = ["child_contexts_value"]
assert arg == mock_val
@pytest.mark.asyncio
async def test_add_context_children_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.add_context_children(
metadata_service.AddContextChildrenRequest(),
context="context_value",
child_contexts=["child_contexts_value"],
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.RemoveContextChildrenRequest,
dict,
],
)
def test_remove_context_children(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.remove_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.RemoveContextChildrenResponse()
response = client.remove_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.RemoveContextChildrenRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_service.RemoveContextChildrenResponse)
def test_remove_context_children_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.remove_context_children), "__call__"
) as call:
client.remove_context_children()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.RemoveContextChildrenRequest()
@pytest.mark.asyncio
async def test_remove_context_children_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.RemoveContextChildrenRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.remove_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.RemoveContextChildrenResponse()
)
response = await client.remove_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.RemoveContextChildrenRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_service.RemoveContextChildrenResponse)
@pytest.mark.asyncio
async def test_remove_context_children_async_from_dict():
await test_remove_context_children_async(request_type=dict)
def test_remove_context_children_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.RemoveContextChildrenRequest()
request.context = "context_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.remove_context_children), "__call__"
) as call:
call.return_value = metadata_service.RemoveContextChildrenResponse()
client.remove_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"context=context_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_remove_context_children_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.RemoveContextChildrenRequest()
request.context = "context_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.remove_context_children), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.RemoveContextChildrenResponse()
)
await client.remove_context_children(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"context=context_value",
) in kw["metadata"]
def test_remove_context_children_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.remove_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.RemoveContextChildrenResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.remove_context_children(
context="context_value",
child_contexts=["child_contexts_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].context
mock_val = "context_value"
assert arg == mock_val
arg = args[0].child_contexts
mock_val = ["child_contexts_value"]
assert arg == mock_val
def test_remove_context_children_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.remove_context_children(
metadata_service.RemoveContextChildrenRequest(),
context="context_value",
child_contexts=["child_contexts_value"],
)
@pytest.mark.asyncio
async def test_remove_context_children_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.remove_context_children), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.RemoveContextChildrenResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.RemoveContextChildrenResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.remove_context_children(
context="context_value",
child_contexts=["child_contexts_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].context
mock_val = "context_value"
assert arg == mock_val
arg = args[0].child_contexts
mock_val = ["child_contexts_value"]
assert arg == mock_val
@pytest.mark.asyncio
async def test_remove_context_children_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.remove_context_children(
metadata_service.RemoveContextChildrenRequest(),
context="context_value",
child_contexts=["child_contexts_value"],
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.QueryContextLineageSubgraphRequest,
dict,
],
)
def test_query_context_lineage_subgraph(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
response = client.query_context_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryContextLineageSubgraphRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
def test_query_context_lineage_subgraph_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
client.query_context_lineage_subgraph()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryContextLineageSubgraphRequest()
@pytest.mark.asyncio
async def test_query_context_lineage_subgraph_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.QueryContextLineageSubgraphRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
response = await client.query_context_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryContextLineageSubgraphRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
@pytest.mark.asyncio
async def test_query_context_lineage_subgraph_async_from_dict():
await test_query_context_lineage_subgraph_async(request_type=dict)
def test_query_context_lineage_subgraph_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryContextLineageSubgraphRequest()
request.context = "context_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
call.return_value = lineage_subgraph.LineageSubgraph()
client.query_context_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"context=context_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_query_context_lineage_subgraph_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryContextLineageSubgraphRequest()
request.context = "context_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
await client.query_context_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"context=context_value",
) in kw["metadata"]
def test_query_context_lineage_subgraph_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.query_context_lineage_subgraph(
context="context_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].context
mock_val = "context_value"
assert arg == mock_val
def test_query_context_lineage_subgraph_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.query_context_lineage_subgraph(
metadata_service.QueryContextLineageSubgraphRequest(),
context="context_value",
)
@pytest.mark.asyncio
async def test_query_context_lineage_subgraph_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_context_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.query_context_lineage_subgraph(
context="context_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].context
mock_val = "context_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_query_context_lineage_subgraph_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.query_context_lineage_subgraph(
metadata_service.QueryContextLineageSubgraphRequest(),
context="context_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.CreateExecutionRequest,
dict,
],
)
def test_create_execution(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution(
name="name_value",
display_name="display_name_value",
state=gca_execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == gca_execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_create_execution_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
client.create_execution()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateExecutionRequest()
@pytest.mark.asyncio
async def test_create_execution_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.CreateExecutionRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution(
name="name_value",
display_name="display_name_value",
state=gca_execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == gca_execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_execution_async_from_dict():
await test_create_execution_async(request_type=dict)
def test_create_execution_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateExecutionRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
call.return_value = gca_execution.Execution()
client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_execution_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateExecutionRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution()
)
await client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_execution_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_execution(
parent="parent_value",
execution=gca_execution.Execution(name="name_value"),
execution_id="execution_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].execution
mock_val = gca_execution.Execution(name="name_value")
assert arg == mock_val
arg = args[0].execution_id
mock_val = "execution_id_value"
assert arg == mock_val
def test_create_execution_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_execution(
metadata_service.CreateExecutionRequest(),
parent="parent_value",
execution=gca_execution.Execution(name="name_value"),
execution_id="execution_id_value",
)
@pytest.mark.asyncio
async def test_create_execution_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_execution(
parent="parent_value",
execution=gca_execution.Execution(name="name_value"),
execution_id="execution_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].execution
mock_val = gca_execution.Execution(name="name_value")
assert arg == mock_val
arg = args[0].execution_id
mock_val = "execution_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_execution_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_execution(
metadata_service.CreateExecutionRequest(),
parent="parent_value",
execution=gca_execution.Execution(name="name_value"),
execution_id="execution_id_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.GetExecutionRequest,
dict,
],
)
def test_get_execution(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = execution.Execution(
name="name_value",
display_name="display_name_value",
state=execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_get_execution_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
client.get_execution()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetExecutionRequest()
@pytest.mark.asyncio
async def test_get_execution_async(
transport: str = "grpc_asyncio", request_type=metadata_service.GetExecutionRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
execution.Execution(
name="name_value",
display_name="display_name_value",
state=execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_execution_async_from_dict():
await test_get_execution_async(request_type=dict)
def test_get_execution_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetExecutionRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
call.return_value = execution.Execution()
client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_execution_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetExecutionRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution())
await client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_get_execution_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = execution.Execution()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_execution(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_execution_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_execution(
metadata_service.GetExecutionRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_execution_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = execution.Execution()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_execution(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_execution_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_execution(
metadata_service.GetExecutionRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.ListExecutionsRequest,
dict,
],
)
def test_list_executions(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListExecutionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListExecutionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_executions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
client.list_executions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListExecutionsRequest()
@pytest.mark.asyncio
async def test_list_executions_async(
transport: str = "grpc_asyncio", request_type=metadata_service.ListExecutionsRequest
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListExecutionsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListExecutionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_executions_async_from_dict():
await test_list_executions_async(request_type=dict)
def test_list_executions_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListExecutionsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
call.return_value = metadata_service.ListExecutionsResponse()
client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_executions_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListExecutionsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListExecutionsResponse()
)
await client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_list_executions_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListExecutionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_executions(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_executions_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_executions(
metadata_service.ListExecutionsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_executions_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListExecutionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListExecutionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_executions(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_executions_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_executions(
metadata_service.ListExecutionsRequest(),
parent="parent_value",
)
def test_list_executions_pager(transport_name: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
execution.Execution(),
],
next_page_token="abc",
),
metadata_service.ListExecutionsResponse(
executions=[],
next_page_token="def",
),
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
],
next_page_token="ghi",
),
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_executions(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, execution.Execution) for i in results)
def test_list_executions_pages(transport_name: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
execution.Execution(),
],
next_page_token="abc",
),
metadata_service.ListExecutionsResponse(
executions=[],
next_page_token="def",
),
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
],
next_page_token="ghi",
),
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
],
),
RuntimeError,
)
pages = list(client.list_executions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_executions_async_pager():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_executions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
execution.Execution(),
],
next_page_token="abc",
),
metadata_service.ListExecutionsResponse(
executions=[],
next_page_token="def",
),
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
],
next_page_token="ghi",
),
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
],
),
RuntimeError,
)
async_pager = await client.list_executions(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager: # pragma: no branch
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, execution.Execution) for i in responses)
@pytest.mark.asyncio
async def test_list_executions_async_pages():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_executions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
execution.Execution(),
],
next_page_token="abc",
),
metadata_service.ListExecutionsResponse(
executions=[],
next_page_token="def",
),
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
],
next_page_token="ghi",
),
metadata_service.ListExecutionsResponse(
executions=[
execution.Execution(),
execution.Execution(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_executions(request={})
).pages: # pragma: no branch
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
metadata_service.UpdateExecutionRequest,
dict,
],
)
def test_update_execution(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution(
name="name_value",
display_name="display_name_value",
state=gca_execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
response = client.update_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == gca_execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
def test_update_execution_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
client.update_execution()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateExecutionRequest()
@pytest.mark.asyncio
async def test_update_execution_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.UpdateExecutionRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution(
name="name_value",
display_name="display_name_value",
state=gca_execution.Execution.State.NEW,
etag="etag_value",
schema_title="schema_title_value",
schema_version="schema_version_value",
description="description_value",
)
)
response = await client.update_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.UpdateExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_execution.Execution)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == gca_execution.Execution.State.NEW
assert response.etag == "etag_value"
assert response.schema_title == "schema_title_value"
assert response.schema_version == "schema_version_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_execution_async_from_dict():
await test_update_execution_async(request_type=dict)
def test_update_execution_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateExecutionRequest()
request.execution.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
call.return_value = gca_execution.Execution()
client.update_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"execution.name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_execution_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.UpdateExecutionRequest()
request.execution.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution()
)
await client.update_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"execution.name=name_value",
) in kw["metadata"]
def test_update_execution_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_execution(
execution=gca_execution.Execution(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].execution
mock_val = gca_execution.Execution(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_execution_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_execution(
metadata_service.UpdateExecutionRequest(),
execution=gca_execution.Execution(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_execution_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_execution.Execution()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_execution.Execution()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_execution(
execution=gca_execution.Execution(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].execution
mock_val = gca_execution.Execution(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_execution_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_execution(
metadata_service.UpdateExecutionRequest(),
execution=gca_execution.Execution(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.DeleteExecutionRequest,
dict,
],
)
def test_delete_execution(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_execution_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_execution), "__call__") as call:
client.delete_execution()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteExecutionRequest()
@pytest.mark.asyncio
async def test_delete_execution_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.DeleteExecutionRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.DeleteExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_execution_async_from_dict():
await test_delete_execution_async(request_type=dict)
def test_delete_execution_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteExecutionRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_execution), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_execution_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.DeleteExecutionRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_execution), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_delete_execution_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_execution(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_execution_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_execution(
metadata_service.DeleteExecutionRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_execution_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_execution(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_execution_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_execution(
metadata_service.DeleteExecutionRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.PurgeExecutionsRequest,
dict,
],
)
def test_purge_executions(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.purge_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.PurgeExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_purge_executions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_executions), "__call__") as call:
client.purge_executions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.PurgeExecutionsRequest()
@pytest.mark.asyncio
async def test_purge_executions_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.PurgeExecutionsRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.purge_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.PurgeExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_purge_executions_async_from_dict():
await test_purge_executions_async(request_type=dict)
def test_purge_executions_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.PurgeExecutionsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_executions), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.purge_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_purge_executions_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.PurgeExecutionsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_executions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.purge_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_purge_executions_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.purge_executions(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_purge_executions_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.purge_executions(
metadata_service.PurgeExecutionsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_purge_executions_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.purge_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.purge_executions(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_purge_executions_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.purge_executions(
metadata_service.PurgeExecutionsRequest(),
parent="parent_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.AddExecutionEventsRequest,
dict,
],
)
def test_add_execution_events(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddExecutionEventsResponse()
response = client.add_execution_events(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddExecutionEventsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_service.AddExecutionEventsResponse)
def test_add_execution_events_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
client.add_execution_events()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddExecutionEventsRequest()
@pytest.mark.asyncio
async def test_add_execution_events_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.AddExecutionEventsRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddExecutionEventsResponse()
)
response = await client.add_execution_events(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.AddExecutionEventsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_service.AddExecutionEventsResponse)
@pytest.mark.asyncio
async def test_add_execution_events_async_from_dict():
await test_add_execution_events_async(request_type=dict)
def test_add_execution_events_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddExecutionEventsRequest()
request.execution = "execution_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
call.return_value = metadata_service.AddExecutionEventsResponse()
client.add_execution_events(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"execution=execution_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_add_execution_events_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.AddExecutionEventsRequest()
request.execution = "execution_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddExecutionEventsResponse()
)
await client.add_execution_events(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"execution=execution_value",
) in kw["metadata"]
def test_add_execution_events_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddExecutionEventsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.add_execution_events(
execution="execution_value",
events=[event.Event(artifact="artifact_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].execution
mock_val = "execution_value"
assert arg == mock_val
arg = args[0].events
mock_val = [event.Event(artifact="artifact_value")]
assert arg == mock_val
def test_add_execution_events_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.add_execution_events(
metadata_service.AddExecutionEventsRequest(),
execution="execution_value",
events=[event.Event(artifact="artifact_value")],
)
@pytest.mark.asyncio
async def test_add_execution_events_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_execution_events), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.AddExecutionEventsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.AddExecutionEventsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.add_execution_events(
execution="execution_value",
events=[event.Event(artifact="artifact_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].execution
mock_val = "execution_value"
assert arg == mock_val
arg = args[0].events
mock_val = [event.Event(artifact="artifact_value")]
assert arg == mock_val
@pytest.mark.asyncio
async def test_add_execution_events_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.add_execution_events(
metadata_service.AddExecutionEventsRequest(),
execution="execution_value",
events=[event.Event(artifact="artifact_value")],
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.QueryExecutionInputsAndOutputsRequest,
dict,
],
)
def test_query_execution_inputs_and_outputs(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
response = client.query_execution_inputs_and_outputs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
def test_query_execution_inputs_and_outputs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
client.query_execution_inputs_and_outputs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest()
@pytest.mark.asyncio
async def test_query_execution_inputs_and_outputs_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.QueryExecutionInputsAndOutputsRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
response = await client.query_execution_inputs_and_outputs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
@pytest.mark.asyncio
async def test_query_execution_inputs_and_outputs_async_from_dict():
await test_query_execution_inputs_and_outputs_async(request_type=dict)
def test_query_execution_inputs_and_outputs_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryExecutionInputsAndOutputsRequest()
request.execution = "execution_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
call.return_value = lineage_subgraph.LineageSubgraph()
client.query_execution_inputs_and_outputs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"execution=execution_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_query_execution_inputs_and_outputs_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryExecutionInputsAndOutputsRequest()
request.execution = "execution_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
await client.query_execution_inputs_and_outputs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"execution=execution_value",
) in kw["metadata"]
def test_query_execution_inputs_and_outputs_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.query_execution_inputs_and_outputs(
execution="execution_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].execution
mock_val = "execution_value"
assert arg == mock_val
def test_query_execution_inputs_and_outputs_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.query_execution_inputs_and_outputs(
metadata_service.QueryExecutionInputsAndOutputsRequest(),
execution="execution_value",
)
@pytest.mark.asyncio
async def test_query_execution_inputs_and_outputs_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_execution_inputs_and_outputs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.query_execution_inputs_and_outputs(
execution="execution_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].execution
mock_val = "execution_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_query_execution_inputs_and_outputs_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.query_execution_inputs_and_outputs(
metadata_service.QueryExecutionInputsAndOutputsRequest(),
execution="execution_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.CreateMetadataSchemaRequest,
dict,
],
)
def test_create_metadata_schema(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_metadata_schema.MetadataSchema(
name="name_value",
schema_version="schema_version_value",
schema="schema_value",
schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE,
description="description_value",
)
response = client.create_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataSchemaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_metadata_schema.MetadataSchema)
assert response.name == "name_value"
assert response.schema_version == "schema_version_value"
assert response.schema == "schema_value"
assert (
response.schema_type
== gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE
)
assert response.description == "description_value"
def test_create_metadata_schema_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
client.create_metadata_schema()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataSchemaRequest()
@pytest.mark.asyncio
async def test_create_metadata_schema_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.CreateMetadataSchemaRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_metadata_schema.MetadataSchema(
name="name_value",
schema_version="schema_version_value",
schema="schema_value",
schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE,
description="description_value",
)
)
response = await client.create_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.CreateMetadataSchemaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_metadata_schema.MetadataSchema)
assert response.name == "name_value"
assert response.schema_version == "schema_version_value"
assert response.schema == "schema_value"
assert (
response.schema_type
== gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE
)
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_metadata_schema_async_from_dict():
await test_create_metadata_schema_async(request_type=dict)
def test_create_metadata_schema_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateMetadataSchemaRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
call.return_value = gca_metadata_schema.MetadataSchema()
client.create_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_metadata_schema_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.CreateMetadataSchemaRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_metadata_schema.MetadataSchema()
)
await client.create_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_metadata_schema_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_metadata_schema.MetadataSchema()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_metadata_schema(
parent="parent_value",
metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"),
metadata_schema_id="metadata_schema_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].metadata_schema
mock_val = gca_metadata_schema.MetadataSchema(name="name_value")
assert arg == mock_val
arg = args[0].metadata_schema_id
mock_val = "metadata_schema_id_value"
assert arg == mock_val
def test_create_metadata_schema_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_metadata_schema(
metadata_service.CreateMetadataSchemaRequest(),
parent="parent_value",
metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"),
metadata_schema_id="metadata_schema_id_value",
)
@pytest.mark.asyncio
async def test_create_metadata_schema_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_metadata_schema.MetadataSchema()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_metadata_schema.MetadataSchema()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_metadata_schema(
parent="parent_value",
metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"),
metadata_schema_id="metadata_schema_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].metadata_schema
mock_val = gca_metadata_schema.MetadataSchema(name="name_value")
assert arg == mock_val
arg = args[0].metadata_schema_id
mock_val = "metadata_schema_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_metadata_schema_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_metadata_schema(
metadata_service.CreateMetadataSchemaRequest(),
parent="parent_value",
metadata_schema=gca_metadata_schema.MetadataSchema(name="name_value"),
metadata_schema_id="metadata_schema_id_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.GetMetadataSchemaRequest,
dict,
],
)
def test_get_metadata_schema(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_schema.MetadataSchema(
name="name_value",
schema_version="schema_version_value",
schema="schema_value",
schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE,
description="description_value",
)
response = client.get_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataSchemaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_schema.MetadataSchema)
assert response.name == "name_value"
assert response.schema_version == "schema_version_value"
assert response.schema == "schema_value"
assert (
response.schema_type
== metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE
)
assert response.description == "description_value"
def test_get_metadata_schema_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
client.get_metadata_schema()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataSchemaRequest()
@pytest.mark.asyncio
async def test_get_metadata_schema_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.GetMetadataSchemaRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_schema.MetadataSchema(
name="name_value",
schema_version="schema_version_value",
schema="schema_value",
schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE,
description="description_value",
)
)
response = await client.get_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.GetMetadataSchemaRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, metadata_schema.MetadataSchema)
assert response.name == "name_value"
assert response.schema_version == "schema_version_value"
assert response.schema == "schema_value"
assert (
response.schema_type
== metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE
)
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_metadata_schema_async_from_dict():
await test_get_metadata_schema_async(request_type=dict)
def test_get_metadata_schema_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetMetadataSchemaRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
call.return_value = metadata_schema.MetadataSchema()
client.get_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_metadata_schema_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.GetMetadataSchemaRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_schema.MetadataSchema()
)
await client.get_metadata_schema(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_get_metadata_schema_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_schema.MetadataSchema()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_metadata_schema(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_metadata_schema_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_metadata_schema(
metadata_service.GetMetadataSchemaRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_metadata_schema_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_metadata_schema), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_schema.MetadataSchema()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_schema.MetadataSchema()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_metadata_schema(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_metadata_schema_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_metadata_schema(
metadata_service.GetMetadataSchemaRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
metadata_service.ListMetadataSchemasRequest,
dict,
],
)
def test_list_metadata_schemas(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataSchemasResponse(
next_page_token="next_page_token_value",
)
response = client.list_metadata_schemas(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataSchemasRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMetadataSchemasPager)
assert response.next_page_token == "next_page_token_value"
def test_list_metadata_schemas_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
client.list_metadata_schemas()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataSchemasRequest()
@pytest.mark.asyncio
async def test_list_metadata_schemas_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.ListMetadataSchemasRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataSchemasResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_metadata_schemas(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.ListMetadataSchemasRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMetadataSchemasAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_metadata_schemas_async_from_dict():
await test_list_metadata_schemas_async(request_type=dict)
def test_list_metadata_schemas_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListMetadataSchemasRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
call.return_value = metadata_service.ListMetadataSchemasResponse()
client.list_metadata_schemas(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_metadata_schemas_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.ListMetadataSchemasRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataSchemasResponse()
)
await client.list_metadata_schemas(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_list_metadata_schemas_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataSchemasResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_metadata_schemas(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_metadata_schemas_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_metadata_schemas(
metadata_service.ListMetadataSchemasRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_metadata_schemas_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = metadata_service.ListMetadataSchemasResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
metadata_service.ListMetadataSchemasResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_metadata_schemas(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_metadata_schemas_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_metadata_schemas(
metadata_service.ListMetadataSchemasRequest(),
parent="parent_value",
)
def test_list_metadata_schemas_pager(transport_name: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
next_page_token="abc",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[],
next_page_token="def",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
],
next_page_token="ghi",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_metadata_schemas(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, metadata_schema.MetadataSchema) for i in results)
def test_list_metadata_schemas_pages(transport_name: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
next_page_token="abc",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[],
next_page_token="def",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
],
next_page_token="ghi",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
),
RuntimeError,
)
pages = list(client.list_metadata_schemas(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_metadata_schemas_async_pager():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
next_page_token="abc",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[],
next_page_token="def",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
],
next_page_token="ghi",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
),
RuntimeError,
)
async_pager = await client.list_metadata_schemas(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager: # pragma: no branch
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, metadata_schema.MetadataSchema) for i in responses)
@pytest.mark.asyncio
async def test_list_metadata_schemas_async_pages():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_metadata_schemas),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
next_page_token="abc",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[],
next_page_token="def",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
],
next_page_token="ghi",
),
metadata_service.ListMetadataSchemasResponse(
metadata_schemas=[
metadata_schema.MetadataSchema(),
metadata_schema.MetadataSchema(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_metadata_schemas(request={})
).pages: # pragma: no branch
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
metadata_service.QueryArtifactLineageSubgraphRequest,
dict,
],
)
def test_query_artifact_lineage_subgraph(request_type, transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
response = client.query_artifact_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
def test_query_artifact_lineage_subgraph_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
client.query_artifact_lineage_subgraph()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest()
@pytest.mark.asyncio
async def test_query_artifact_lineage_subgraph_async(
transport: str = "grpc_asyncio",
request_type=metadata_service.QueryArtifactLineageSubgraphRequest,
):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
response = await client.query_artifact_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, lineage_subgraph.LineageSubgraph)
@pytest.mark.asyncio
async def test_query_artifact_lineage_subgraph_async_from_dict():
await test_query_artifact_lineage_subgraph_async(request_type=dict)
def test_query_artifact_lineage_subgraph_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryArtifactLineageSubgraphRequest()
request.artifact = "artifact_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
call.return_value = lineage_subgraph.LineageSubgraph()
client.query_artifact_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"artifact=artifact_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_query_artifact_lineage_subgraph_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = metadata_service.QueryArtifactLineageSubgraphRequest()
request.artifact = "artifact_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
await client.query_artifact_lineage_subgraph(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"artifact=artifact_value",
) in kw["metadata"]
def test_query_artifact_lineage_subgraph_flattened():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.query_artifact_lineage_subgraph(
artifact="artifact_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].artifact
mock_val = "artifact_value"
assert arg == mock_val
def test_query_artifact_lineage_subgraph_flattened_error():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.query_artifact_lineage_subgraph(
metadata_service.QueryArtifactLineageSubgraphRequest(),
artifact="artifact_value",
)
@pytest.mark.asyncio
async def test_query_artifact_lineage_subgraph_flattened_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.query_artifact_lineage_subgraph), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = lineage_subgraph.LineageSubgraph()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
lineage_subgraph.LineageSubgraph()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.query_artifact_lineage_subgraph(
artifact="artifact_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].artifact
mock_val = "artifact_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_query_artifact_lineage_subgraph_flattened_error_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.query_artifact_lineage_subgraph(
metadata_service.QueryArtifactLineageSubgraphRequest(),
artifact="artifact_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.MetadataServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.MetadataServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MetadataServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.MetadataServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = MetadataServiceClient(
client_options=options,
transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = MetadataServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.MetadataServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MetadataServiceClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.MetadataServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = MetadataServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.MetadataServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.MetadataServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
],
)
def test_transport_kind(transport_name):
transport = MetadataServiceClient.get_transport_class(transport_name)(
credentials=ga_credentials.AnonymousCredentials(),
)
assert transport.kind == transport_name
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.MetadataServiceGrpcTransport,
)
def test_metadata_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.MetadataServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_metadata_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.MetadataServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_metadata_store",
"get_metadata_store",
"list_metadata_stores",
"delete_metadata_store",
"create_artifact",
"get_artifact",
"list_artifacts",
"update_artifact",
"delete_artifact",
"purge_artifacts",
"create_context",
"get_context",
"list_contexts",
"update_context",
"delete_context",
"purge_contexts",
"add_context_artifacts_and_executions",
"add_context_children",
"remove_context_children",
"query_context_lineage_subgraph",
"create_execution",
"get_execution",
"list_executions",
"update_execution",
"delete_execution",
"purge_executions",
"add_execution_events",
"query_execution_inputs_and_outputs",
"create_metadata_schema",
"get_metadata_schema",
"list_metadata_schemas",
"query_artifact_lineage_subgraph",
"set_iam_policy",
"get_iam_policy",
"test_iam_permissions",
"get_location",
"list_locations",
"get_operation",
"wait_operation",
"cancel_operation",
"delete_operation",
"list_operations",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
# Catch all for all remaining methods and properties
remainder = [
"kind",
]
for r in remainder:
with pytest.raises(NotImplementedError):
getattr(transport, r)()
def test_metadata_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MetadataServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_metadata_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MetadataServiceTransport()
adc.assert_called_once()
def test_metadata_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MetadataServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
def test_metadata_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
def test_metadata_service_transport_auth_gdch_credentials(transport_class):
host = "https://language.com"
api_audience_tests = [None, "https://language2.com"]
api_audience_expect = [host, "https://language2.com"]
for t, e in zip(api_audience_tests, api_audience_expect):
with mock.patch.object(google.auth, "default", autospec=True) as adc:
gdch_mock = mock.MagicMock()
type(gdch_mock).with_gdch_audience = mock.PropertyMock(
return_value=gdch_mock
)
adc.return_value = (gdch_mock, None)
transport_class(host=host, api_audience=t)
gdch_mock.with_gdch_audience.assert_called_once_with(e)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.MetadataServiceGrpcTransport, grpc_helpers),
(transports.MetadataServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_metadata_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
def test_metadata_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_metadata_service_host_no_port(transport_name):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == ("aiplatform.googleapis.com:443")
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_metadata_service_host_with_port(transport_name):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == ("aiplatform.googleapis.com:8000")
def test_metadata_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MetadataServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_metadata_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MetadataServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
def test_metadata_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.MetadataServiceGrpcTransport,
transports.MetadataServiceGrpcAsyncIOTransport,
],
)
def test_metadata_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_metadata_service_grpc_lro_client():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_metadata_service_grpc_lro_async_client():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsAsyncClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_artifact_path():
project = "squid"
location = "clam"
metadata_store = "whelk"
artifact = "octopus"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(
project=project,
location=location,
metadata_store=metadata_store,
artifact=artifact,
)
actual = MetadataServiceClient.artifact_path(
project, location, metadata_store, artifact
)
assert expected == actual
def test_parse_artifact_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"metadata_store": "cuttlefish",
"artifact": "mussel",
}
path = MetadataServiceClient.artifact_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_artifact_path(path)
assert expected == actual
def test_context_path():
project = "winkle"
location = "nautilus"
metadata_store = "scallop"
context = "abalone"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(
project=project,
location=location,
metadata_store=metadata_store,
context=context,
)
actual = MetadataServiceClient.context_path(
project, location, metadata_store, context
)
assert expected == actual
def test_parse_context_path():
expected = {
"project": "squid",
"location": "clam",
"metadata_store": "whelk",
"context": "octopus",
}
path = MetadataServiceClient.context_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_context_path(path)
assert expected == actual
def test_execution_path():
project = "oyster"
location = "nudibranch"
metadata_store = "cuttlefish"
execution = "mussel"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(
project=project,
location=location,
metadata_store=metadata_store,
execution=execution,
)
actual = MetadataServiceClient.execution_path(
project, location, metadata_store, execution
)
assert expected == actual
def test_parse_execution_path():
expected = {
"project": "winkle",
"location": "nautilus",
"metadata_store": "scallop",
"execution": "abalone",
}
path = MetadataServiceClient.execution_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_execution_path(path)
assert expected == actual
def test_metadata_schema_path():
project = "squid"
location = "clam"
metadata_store = "whelk"
metadata_schema = "octopus"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(
project=project,
location=location,
metadata_store=metadata_store,
metadata_schema=metadata_schema,
)
actual = MetadataServiceClient.metadata_schema_path(
project, location, metadata_store, metadata_schema
)
assert expected == actual
def test_parse_metadata_schema_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"metadata_store": "cuttlefish",
"metadata_schema": "mussel",
}
path = MetadataServiceClient.metadata_schema_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_metadata_schema_path(path)
assert expected == actual
def test_metadata_store_path():
project = "winkle"
location = "nautilus"
metadata_store = "scallop"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(
project=project,
location=location,
metadata_store=metadata_store,
)
actual = MetadataServiceClient.metadata_store_path(
project, location, metadata_store
)
assert expected == actual
def test_parse_metadata_store_path():
expected = {
"project": "abalone",
"location": "squid",
"metadata_store": "clam",
}
path = MetadataServiceClient.metadata_store_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_metadata_store_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = MetadataServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = MetadataServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(
folder=folder,
)
actual = MetadataServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = MetadataServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(
organization=organization,
)
actual = MetadataServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = MetadataServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(
project=project,
)
actual = MetadataServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = MetadataServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
actual = MetadataServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = MetadataServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = MetadataServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.MetadataServiceTransport, "_prep_wrapped_messages"
) as prep:
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.MetadataServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = MetadataServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_delete_operation(transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = operations_pb2.DeleteOperationRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_operation_async(transport: str = "grpc"):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = operations_pb2.DeleteOperationRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert response is None
def test_delete_operation_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = operations_pb2.DeleteOperationRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
call.return_value = None
client.delete_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_operation_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = operations_pb2.DeleteOperationRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
def test_delete_operation_from_dict():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_operation(
request={
"name": "locations",
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_delete_operation_from_dict_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_operation(
request={
"name": "locations",
}
)
call.assert_called()
def test_cancel_operation(transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = operations_pb2.CancelOperationRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_operation_async(transport: str = "grpc"):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = operations_pb2.CancelOperationRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_operation_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = operations_pb2.CancelOperationRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
call.return_value = None
client.cancel_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_operation_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = operations_pb2.CancelOperationRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
def test_cancel_operation_from_dict():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_operation(
request={
"name": "locations",
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_cancel_operation_from_dict_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_operation(
request={
"name": "locations",
}
)
call.assert_called()
def test_wait_operation(transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = operations_pb2.WaitOperationRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation()
response = client.wait_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, operations_pb2.Operation)
@pytest.mark.asyncio
async def test_wait_operation(transport: str = "grpc"):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = operations_pb2.WaitOperationRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation()
)
response = await client.wait_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, operations_pb2.Operation)
def test_wait_operation_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = operations_pb2.WaitOperationRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
call.return_value = operations_pb2.Operation()
client.wait_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_wait_operation_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = operations_pb2.WaitOperationRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation()
)
await client.wait_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
def test_wait_operation_from_dict():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation()
response = client.wait_operation(
request={
"name": "locations",
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_wait_operation_from_dict_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.wait_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation()
)
response = await client.wait_operation(
request={
"name": "locations",
}
)
call.assert_called()
def test_get_operation(transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = operations_pb2.GetOperationRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation()
response = client.get_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, operations_pb2.Operation)
@pytest.mark.asyncio
async def test_get_operation_async(transport: str = "grpc"):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = operations_pb2.GetOperationRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation()
)
response = await client.get_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, operations_pb2.Operation)
def test_get_operation_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = operations_pb2.GetOperationRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
call.return_value = operations_pb2.Operation()
client.get_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_operation_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = operations_pb2.GetOperationRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation()
)
await client.get_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
def test_get_operation_from_dict():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation()
response = client.get_operation(
request={
"name": "locations",
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_get_operation_from_dict_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation()
)
response = await client.get_operation(
request={
"name": "locations",
}
)
call.assert_called()
def test_list_operations(transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = operations_pb2.ListOperationsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.ListOperationsResponse()
response = client.list_operations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, operations_pb2.ListOperationsResponse)
@pytest.mark.asyncio
async def test_list_operations_async(transport: str = "grpc"):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = operations_pb2.ListOperationsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.ListOperationsResponse()
)
response = await client.list_operations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, operations_pb2.ListOperationsResponse)
def test_list_operations_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = operations_pb2.ListOperationsRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
call.return_value = operations_pb2.ListOperationsResponse()
client.list_operations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_operations_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = operations_pb2.ListOperationsRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.ListOperationsResponse()
)
await client.list_operations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
def test_list_operations_from_dict():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.ListOperationsResponse()
response = client.list_operations(
request={
"name": "locations",
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_list_operations_from_dict_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.ListOperationsResponse()
)
response = await client.list_operations(
request={
"name": "locations",
}
)
call.assert_called()
def test_list_locations(transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = locations_pb2.ListLocationsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = locations_pb2.ListLocationsResponse()
response = client.list_locations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, locations_pb2.ListLocationsResponse)
@pytest.mark.asyncio
async def test_list_locations_async(transport: str = "grpc"):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = locations_pb2.ListLocationsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
locations_pb2.ListLocationsResponse()
)
response = await client.list_locations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, locations_pb2.ListLocationsResponse)
def test_list_locations_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = locations_pb2.ListLocationsRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
call.return_value = locations_pb2.ListLocationsResponse()
client.list_locations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_locations_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = locations_pb2.ListLocationsRequest()
request.name = "locations"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
locations_pb2.ListLocationsResponse()
)
await client.list_locations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations",
) in kw["metadata"]
def test_list_locations_from_dict():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = locations_pb2.ListLocationsResponse()
response = client.list_locations(
request={
"name": "locations",
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_list_locations_from_dict_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
locations_pb2.ListLocationsResponse()
)
response = await client.list_locations(
request={
"name": "locations",
}
)
call.assert_called()
def test_get_location(transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = locations_pb2.GetLocationRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_location), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = locations_pb2.Location()
response = client.get_location(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, locations_pb2.Location)
@pytest.mark.asyncio
async def test_get_location_async(transport: str = "grpc_asyncio"):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = locations_pb2.GetLocationRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_location), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
locations_pb2.Location()
)
response = await client.get_location(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, locations_pb2.Location)
def test_get_location_field_headers():
client = MetadataServiceClient(credentials=ga_credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = locations_pb2.GetLocationRequest()
request.name = "locations/abc"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_location), "__call__") as call:
call.return_value = locations_pb2.Location()
client.get_location(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations/abc",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_location_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials()
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = locations_pb2.GetLocationRequest()
request.name = "locations/abc"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_location), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
locations_pb2.Location()
)
await client.get_location(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=locations/abc",
) in kw["metadata"]
def test_get_location_from_dict():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = locations_pb2.Location()
response = client.get_location(
request={
"name": "locations/abc",
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_get_location_from_dict_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
locations_pb2.Location()
)
response = await client.get_location(
request={
"name": "locations",
}
)
call.assert_called()
def test_set_iam_policy(transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.SetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(
version=774,
etag=b"etag_blob",
)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.SetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(
version=774,
etag=b"etag_blob",
)
)
response = await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource/value",
) in kw["metadata"]
def test_set_iam_policy_from_dict():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_set_iam_policy_from_dict_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
response = await client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
}
)
call.assert_called()
def test_get_iam_policy(transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.GetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(
version=774,
etag=b"etag_blob",
)
response = client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.GetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(
version=774,
etag=b"etag_blob",
)
)
response = await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_get_iam_policy_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource/value",
) in kw["metadata"]
def test_get_iam_policy_from_dict():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_get_iam_policy_from_dict_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
response = await client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
def test_test_iam_permissions(transport: str = "grpc"):
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.TestIamPermissionsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
response = client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy_pb2.TestIamPermissionsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
)
response = await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_field_headers():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource/value",
) in kw["metadata"]
def test_test_iam_permissions_from_dict():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
response = client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_test_iam_permissions_from_dict_async():
client = MetadataServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
response = await client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = MetadataServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(MetadataServiceClient, transports.MetadataServiceGrpcTransport),
(MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
| {
"content_hash": "a9d235ff23f7bec2afcb64dff4664a3c",
"timestamp": "",
"source": "github",
"line_count": 12079,
"max_line_length": 130,
"avg_line_length": 36.2813974666777,
"alnum_prop": 0.6502282979990553,
"repo_name": "googleapis/python-aiplatform",
"id": "c82d476bb5a8c197a3432d3770a693dc30277b1f",
"size": "438843",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/gapic/aiplatform_v1/test_metadata_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
from functools import lru_cache
import random
import re
from sqlalchemy import Column, String
from seabird.plugin import Plugin, CommandMixin
from .db import Base, DatabaseMixin
class Bleep(Base):
__tablename__ = "bleep"
bad_word = Column(String, primary_key=True)
replacement = Column(String)
class BleepPlugin(Plugin, CommandMixin, DatabaseMixin):
__disabled__ = True
REPLIES = [
'Hey, watch your mouth! Say "{}" instead.',
'Pottymouth! We say "{}" in this channel.',
'Uh oh, you should really say "{}" instead.',
'Time to put a quarter in the jar! You should really use "{}" instead.',
]
@lru_cache(maxsize=1)
def _get_bleeps(self):
"""
Gets a list of bleeped words and their replacements
@return [Bleep] List of bleeps
"""
with self.db.session() as session:
query = session.query(Bleep)
bleeps = query.all()
# This call is necessary to use the retrieved bleeps outside the
# scope of the with
session.expunge_all()
return bleeps
def cmd_bleep(self, msg):
"""
Begin to bleep `bad_word` with `replacement`.
`bad_word` is args[0]
`replacement` is args[1]
"""
args = msg.trailing.lower().strip().split(" ")
if len(args) < 2:
self.bot.reply(msg, "Must supply a bad word and a replacement")
return
bad_word = args[0]
replacement = args[1]
with self.db.session() as session:
bleep, _ = session.get_or_create(Bleep, bad_word=bad_word)
bleep.replacement = replacement
session.add(bleep)
# Invalidate the cache on _get_bleeps so that we read the new value
self._get_bleeps.cache_clear()
self.bot.reply(
msg, "Will now bleep out {} with {}".format(bad_word, replacement)
)
def irc_privmsg(self, msg): # pylint: disable=arguments-differ
super().irc_privmsg(msg)
if not msg.from_channel:
return
trailing = msg.trailing.lower().strip()
if trailing.startswith("{}bleep".format(self.bot.config["PREFIX"])):
return
words = trailing.split(" ")
for bleep in self._get_bleeps():
regex = re.compile(r"\b{}\b".format(bleep.bad_word))
for word in words:
if regex.match(word):
reply = random.choice(self.REPLIES)
self.bot.mention_reply(msg, reply.format(bleep.replacement))
| {
"content_hash": "a0da4be13619996899aa21d5da479275",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 80,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.5753846153846154,
"repo_name": "belak/pyseabird",
"id": "d81d3c48035be2c95f0fb7c77af616344fa74e1e",
"size": "2600",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "seabird/modules/bleep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "41493"
}
],
"symlink_target": ""
} |
import telnetlib
import time
from getpass import getpass
import socket
import sys
pynet_rtr1 = u'184.105.247.70'
COMMAND = ('show ip interface brief',)
TELNET_PORT = 23
TELNET_TIMEOUT = 6
class TN(object):
def __init__(self, ip_addr, TELNET_PORT=23, TELNET_TIMEOUT=6):
'''' constructor to initialise variables'''
self.TELNET_PORT = TELNET_PORT
self.TELNET_TIMEOUT = TELNET_TIMEOUT
self.ip_addr = ip_addr
def __enter__(self):
try:
self.conn = telnetlib.Telnet(self.ip_addr, self.TELNET_PORT, self.TELNET_TIMEOUT)
return self
except socket.timeout:
sys.exit("Connection timed-out")
def login(self, username, password):
output = self.conn.expect([r'[uU]sername:', r'[lL]ogin:'], TELNET_TIMEOUT)
print output
output = output[2]
self.conn.write(username + '\n')
if password:
output += self.conn.read_until('ssword:', TELNET_TIMEOUT)
self.conn.write(password + '\n')
output += self.conn.read_very_eager() #read eargely and display login process.
return output
def send_command(self, cmd):
'''This function will send a command over telnet session and returns a string.'''
cmd = cmd.strip()
self.conn.write(cmd + '\n')
time.sleep(1)
return self.conn.read_very_eager()
def __exit__(self, ip_addr, TELNET_PORT=23, TELNET_TIMEOUT=6):
self.conn.close()
def __del__(self):
self.conn.close()
if __name__ == "__main__":
print "Telnet is not secure, but we will still going to use it"
username = raw_input("Enter username : ")
password = getpass()
with TN(pynet_rtr1) as remote_conn:
print remote_conn.login(username, password)
time.sleep(1)
remote_conn.send_command('term len 0') # disable paging
print remote_conn.send_command(COMMAND[0])
time.sleep(6)
| {
"content_hash": "3df2b967a7b5f24fad434ce88029e6f0",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 93,
"avg_line_length": 31.483870967741936,
"alnum_prop": 0.6111680327868853,
"repo_name": "krunal7558/week2",
"id": "538e038f11aa344a24e9aca40c9d7aaeb19f83e0",
"size": "1975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "class2-telnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5608"
}
],
"symlink_target": ""
} |
"""
===================
Gtf_subset pipeline
===================
Overview
========
This pipeline generates a number of annotations that can be used with
downstream CGAT pipelines. The user will download a GTF from ENSEMBL
and then the GTF is parsed and filtered.In addition to downloading an
ensembl GTF the user will need to download an assembly report for their
specific genome and add it to the directory the pipeline is ran.
Common to all of the annotations generated in this pipeline is that they
are genomic - i.e. they are genomic intervals or relate to genomic intervals.
Thus, annotations are tied to a particular version of the genome. This is
parameterised within the pipeline.ini configuration file. The pipeline
follows two principle releases: the UCSC_ genome assembly and an ENSEMBL_
geneset version.
Note: This pipeline replaces pipeline_annotations which has now been moved
to the obsolete folder.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Principle targets
-----------------
lite
This will run annotations that are used for our common upstream
pipelines. i.e. pipeline_readqc.py, pipeline_mapping.py and
pipeline_bamstats.py.
full
This will run the entire pipeline
Configuration
-------------
The :file:`pipeline.ini` needs to be edited so that it points to the
appropriate locations of the auxiliary files.
On top of the default CGAT setup, the pipeline requires the following
software to be in the path:
Input
-----
This pipeline requires a Ensembl GTF, mirBase GFF3 file and an assembly report.
Ensembl GTF:
This can be downloaded from http://www.ensembl.org/info/data/ftp/index.html
Note: CGAT pipelines use the UCSC GTF convention (chr naming of contigs)
and therefore the GTF is sanitized to the UCSC convention. As part of this
process an ncbi assembly report needs to be specified (see below).
Assembly report:
This is downloaded from the ncbi assembly page for your specific genome.
Using hg19 as an example:
Navigate to www......
From the database tab select assembly and add your genome into the
search bar i.e. hg19.
Then click the link "Download the full sequence report"
Add it to the folder where the pipeline will be ran, the file is
for hg38 is called "GRCh38.p10_assembly_report.txt".
miRbase GFF3:
This can be downloaded from miRbase http://www.mirbase.org/ftp.shtml.
A path to the :term:`GFF3` file needs to be specified in the pipelin.ini
configuration file. Make sure that the genome build version of the GFF3
annotation file matches the ENSEMBL genome.
Running
-------
To run the pipeline, perform the following:
To run a basic set of annotations for the use of our common upstream pipelines
such as pipeline_readqc.py, pipeline_mapping.py and pipeline_bamstats.py you
can run the task `lite` as follows
::
python /path/to/directory/pipeline_genesets.py make lite -v 5
To run the full set of annotations produced in this pipeline (which will be
used for some our our downstream pipelines such as pipeline_intervals.py)
you can run the `full` task:
::
python /path/to/directory/pipeline_genesets.py make full -v 5
The pipeline can be run as any other CGAT pipeline, but as its purpose
is to provide a set of annotation that can be used by other pipelines
therefore there is an etiquette to be followed:
Using the pipeline results
--------------------------
The gtf_subset pipeline provides an interface for presenting its
results to other pipelines. The interface is defined in the file
:file:`pipeline.ini`. For example::
[interface]
# fasta file with cdna sequences
cdna_fasta=ensembl.dir/cdna.fasta
The ini file of pipeline annotations can be loaded into the parameter
dictionary of your own pipeline::
PARAMS.update(P.peekParameters(
PARAMS["annotations_dir"],
"pipeline_genesets.py",
prefix="annotations_"),
update_interface=True)
Parameters from the gtf_subset pipeline are now accessible via the
``annotations_`` prefix. As a result, the file
:file:`ensembl.dir/cdna.fasta` can be accessed as::
PARAMS['annotations_cdna_fasta']
Working with non-ENSEMBL species
--------------------------------
:doc:`pipeline_gtf_subset` is very much wedded to annotations in ENSEMBL-
and UCSC_. Using a non-ENSEMBL species or non-UCSC species is possible by
building ENSEMBL- or UCSC-like input files. Even so, annotations that are
downloaded from the ENSEMBL or UCSC database will not be built. You will
thus need to ask if it is worth the effort.
As other pipelines will depend on the annotations in this pipeline it is
necessary to set up a :doc:`pipeline_gtf_subset` stub. To do so, simply
build the config files by running::
python <SRC>pipeline_annotations.py config
and create the files that are being used in the downstream pipeline
explicitely (for example, for protein coding genes)::
mkdir ensembl.dir
cp <MYDATADIR>/my_gtf_geneset.gtf.gz ensembl.dir/geneset_coding.gtf.gz
Pipeline output
===============
The results of the computation are all stored in an sqlite relational
database file csvdb or as compressed files in genomic formats in the pipeline
directories. Output files are grouped by sections listed below.
The sections correspond to primary targets in the pipeline, i.e., to
build all annotations in the section ``assembly`` type::
python <SRC>pipeline_annotations.py make assembly
Section: assembly
-----------------
Annotations derived from the genome assembly. Results are
in :file:`assembly.dir`.
contigs.tsv
A :term:`tsv` formatted table with contig sizes
contigs.bed.gz
bed file with contig sizes
section: ensembl
----------------
geneset_all.gtf.gz
The full gene set after reconciling with assembly. Chromosomes names are
renamed to be consistent with the assembly and some chromosomes
are optionally removed. This file is the starting point for
all annotations derived from the ENSEMBL geneset.
geneset_cds.gtf.gz
A :term:`gtf` formatted file with only the CDS parts of transcripts.
This set will naturally include only coding transcripts. UTR regions
have been removed.
geneset_exons.gtf.gz
A :term:`gtf` formatted file with only the exon parts of transcripts.
This set includes both coding and non-coding transcripts. Coding
transcripts span both the UTR and the CDS.
geneset_coding_exons.gtf.gz
:term:`gtf` file with exon parts of protein coding transcripts.
All other features are removed. These are all features annotated
as "protein_coding" in the ENSEMBL gtf file.
geneset_noncoding_exons.gtf.gz
:term:`gtf` file with exon parts of non-coding transcripts
all other features are removed. These are all transcripts not
annotated as "protein_coding" in the ENSEMBL gtf file.
geneset_lincrna_exons.gtf.gz
:term:`gtf` file with exon parts of lincRNA transcripts. These
are transcripts annotated as "lincRNA" in the ENSEMBL gtf file.
geneset_flat.gtf.gz
A :term:`gtf` formatted file of flattened gene
models. All overlapping transcripts have been merged. This set
includes both coding and non-coding transcripts.
geneset_introns.gtf.gz
A :term:`gtf` formatted file containing all intron features. All
protein coding genes are retained and their exonic sequences are
removed to retain introns from nested genes that may overlap.
section: mirbase
----------------
miRNA_non_primary_transcripts.gff3.gz
A :term:`gff3` formatted file containing all of the non primary miRNA
transcripts from mirbase
miRNA_primary_transcripts.gff3.gz
A :term:`GFF3` formatted file containing all of the primery miRNA
transcripts from miRbase.
section: ucsc
-------------
repeats.gff.gz
:term:`gff` formatted file with structural/complex repeats
rna.gff.gz
:term:`gff` formatted file with ribosomal rna annotations
section: geneset
----------------
Annotations derived from the ENSEMBL gene set. Annotations in
this section have been computed from the ENSEMBL gene set.
Results are in the directory :file:`geneset.dir`.
ref_flat.txt
This creates a flat reference file from geneset_flat.gtf.gz
for use in picard tools RNAseqmetrics.
section: bed
------------
This directory contains bed files that are generated from other annotations
in this pipeline.
genomic_context.bed.gz
bed-formatted file with genomic context
Section: enrichment
-------------------
This section contains useful files for genomic enrichment analysis
a la gat_. The annotations are derived from other annotations in
this pipeline. Output files are in the directory :file:`enrichment.dir`.
annotation_gff.gz
A :term:`gff` formatted file annotating the genome with respect
to the geneset. Annotations are non-overlapping and are based
only on protein coding transcripts.
genestructure.gff.gz
A :term:`gff` file annotation genomic regions by gene structure
territories.gff.gz
gff file with gene territories, .i.e. regions around protein
coding genes. Intergenic space between genes is split at the
midpoint between two genes.
tssterritories.gff.gz
gff file with tss territories
greatdomains.gff.gz
gff file of regulator domains defined a la GREAT
genomic_context_bed=genomic_context.bed.gz
bed-formatted file with genomic context
genomic_function_bed=genomic_function.bed.gz
bed-formatted file with functional annotations
genomic_function_tsv=genomic_function.tsv.gz
tsv-formatted file mapping terms to descriptions
Database design
---------------
Tables in the database usually represent genomic features such as
transcripts, genes or chromosomes. These are identified by the
following columns:
+--------------------+-----------------------------------------+
|*Column* |*Content* |
+--------------------+-----------------------------------------+
|transcript_id |ENSEMBL transcript identifier |
+--------------------+-----------------------------------------+
|gene_id |ENSEMBL gene id |
+--------------------+-----------------------------------------+
|contig |Chromosome name |
+--------------------+-----------------------------------------+
Example
=======
**Supply example data**
====
Code
====
"""
import sys
import re
import os
import sqlite3
import glob
import pandas as pd
from ruffus import follows, transform, merge, mkdir, files, jobs_limit,\
suffix, regex, add_inputs, originate
import CGAT.IndexedFasta as IndexedFasta
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGATPipelines.Pipeline as P
import CGATPipelines.PipelineGtfsubset as PipelineGtfsubset
import CGATPipelines.PipelineUCSC as PipelineUCSC
import CGATPipelines.PipelineGeneset as PipelineGeneset
import CGATPipelines.PipelineGO as PipelineGO
###################################################
# Pipeline configuration
###################################################
PARAMS = P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
# Add automatically created files to the interface. This is required
# when the pipeline is peek'ed. The statement below will
# add the following to the dictionary:
#
# "geneset.dir/lincrna_gene_tss.bed.gz" maps to
# "interface_geneset_lincrna_gene_tss_bed"
PARAMS.update(dict([
("interface_geneset_%s" %
re.sub("[.]", "_", os.path.basename(P.snip(x, ".gz"))), x)
for x in glob.glob('geneset.dir/*.bed.gz')]))
def connect():
'''connect to database.'''
dbh = sqlite3.connect(PARAMS["database_name"])
return dbh
def connectToUCSC():
return PipelineGtfsubset.connectToUCSC(
host=PARAMS["ucsc_host"],
user=PARAMS["ucsc_user"],
database=PARAMS["ucsc_database"])
############################################################
# Assembly
############################################################
@follows(mkdir('assembly.dir'))
@files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"),
PARAMS['interface_contigs'])
def buildContigSizes(infile, outfile):
'''
Get contig sizes from indexed genome :term:`fasta` files and
outputs to a text file.
Parameters
----------
infile : str
infile is constructed from the `PARAMS` variable to retrieve
the `genome` :term:`fasta` file
Returns
-------
outfile : str
outfile is a text format file that contains two columns, matched
contig name and contig size (in nucleotides). The output file
name is defined in `PARAMS: interface_contigs`.
'''
prefix = P.snip(infile, ".fasta")
fasta = IndexedFasta.IndexedFasta(prefix)
contigs = []
for contig, size in fasta.getContigSizes(with_synonyms=False).items():
contigs.append([contig, size])
df_contig = pd.DataFrame(contigs, columns=['contigs', 'size'])
df_contig.sort_values('contigs', inplace=True)
df_contig.to_csv(outfile, sep="\t", header=False, index=False)
@follows(buildContigSizes)
@follows(mkdir('assembly.dir'))
@files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"),
PARAMS['interface_contigs_bed'])
def buildContigBed(infile, outfile):
'''
Gets the contig sizes and co-ordinates from an indexed genome :term:`fasta`
file and outputs them to :term:`BED` format
Parameters
----------
infile : str
infile is constructed from `PARAMS` variable to retrieve
the `genome` :term:`fasta` file
Returns
-------
outfile : str
:term:`BED` format file containing contig name, value (0) and contig size
in nucleotides. The output file name is defined in
`PARAMS: interface_contigs_bed`
'''
prefix = P.snip(infile, ".fasta")
fasta = IndexedFasta.IndexedFasta(prefix)
outs = IOTools.openFile(outfile, "w")
for contig, size in fasta.getContigSizes(with_synonyms=False).items():
outs.write("%s\t%i\t%i\n" % (contig, 0, size))
outs.close()
@follows(buildContigBed)
@files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"),
(PARAMS['interface_contigs_ungapped_bed'],
PARAMS['interface_gaps_bed'],
))
def buildUngappedContigBed(infile, outfiles):
'''
Constructs :term:`BED` format files containing both gapped and ungapped
contig sizes from an index genome :term:`fasta` file.
Parameters
----------
infile: str
infile is constructed from `PARAMS` variable to retrieve
the `genome` :term:`fasta` file
assembly_gaps_min_size: int
`PARAMS` - the minimum size (in nucleotides) for an assembly gap
Returns
-------
outfiles: list
two separate :term:`BED` format output files containing the contig sizes
for contigs with and without gaps. The names are defined
in the `PARAMS` `interface_contigs_ungapped_bed` and
`interface_gaps_bed` parameters.
'''
prefix = P.snip(infile, ".fasta")
fasta = IndexedFasta.IndexedFasta(prefix)
outs_nogap = IOTools.openFile(outfiles[0], "w")
outs_gap = IOTools.openFile(outfiles[1], "w")
min_gap_size = PARAMS["assembly_gaps_min_size"]
for contig, size in fasta.getContigSizes(with_synonyms=False).items():
seq = fasta.getSequence(contig)
def gapped_regions(seq):
is_gap = seq[0] == "N"
last = 0
for x, c in enumerate(seq):
if c == "N":
if not is_gap:
last = x
is_gap = True
else:
if is_gap:
yield(last, x)
last = x
is_gap = False
if is_gap:
yield last, size
last_end = 0
for start, end in gapped_regions(seq):
if end - start < min_gap_size:
continue
if last_end != 0:
outs_nogap.write("%s\t%i\t%i\n" % (contig, last_end, start))
outs_gap.write("%s\t%i\t%i\n" % (contig, start, end))
last_end = end
if last_end < size:
outs_nogap.write("%s\t%i\t%i\n" % (contig, last_end, size))
outs_nogap.close()
outs_gap.close()
@follows(buildUngappedContigBed)
@files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"),
PARAMS['interface_cpg_bed'])
def buildCpGBed(infile, outfile):
'''
Output a :term:`BED` file that contains the location of all CpGs
in the input genome using `CGAT` script `fasta2bed`.
Parameters
----------
infile: str
infile is constructed from `PARAMS` variable to retrieve
the `genome` :term:`fasta` file
Returns
-------
outfile: str
A :term:`BED` format file containing location of CpGs across the
genome. The BED file is then indexed using tabix
'''
job_memory = PARAMS["job_highmemory"]
statement = '''
cgat fasta2bed
--method=cpg
--log=%(outfile)s.log
< %(infile)s
| bgzip
> %(outfile)s
'''
P.run()
statement = '''
tabix -p bed %(outfile)s
'''
P.run()
###################################################################
# ENSEMBL gene set
###################################################################
@follows(mkdir('ensembl.dir'))
@transform(PARAMS["ensembl_filename_gtf"],
regex("(\S+)"),
r"%s" % PARAMS['interface_geneset_all_gtf'])
def buildUCSCGeneSet(infile, outfile):
'''output sanitized ENSEMBL geneset.
This method outputs an ENSEMBL gene set after some sanitizing steps:
1. Chromosome names are changed to the UCSC convention.
2. Chromosomes that match the regular expression specified in
the configuration file are removed.
Arguments
---------
infiles : string
ENSEMBL geneset in :term:`gtf` format.
NCBI Assembly report in 'txt' format.
outfile : string
geneset in :term:`gtf` format.
'''
job_memory = PARAMS["job_memory"]
statement = ['''zcat %(infile)s
| grep 'transcript_id'
| cgat gff2gff
--method=sanitize
--sanitize-method=ucsc
--assembly-report="%(ncbi_assembly_report)s"
--log=%(outfile)s.log
''']
if PARAMS["ncbi_remove_contigs"]:
# in quotation marks to avoid confusion with shell special
# characters such as ( and |
statement.append(
''' --contig-pattern="%(ncbi_remove_contigs)s" ''')
statement.append(
'''
| cgat gtf2gtf
--method=set-gene_biotype-to-source
--log=%(outfile)s.log
| gzip > %(outfile)s ''')
statement = " ".join(statement)
P.run()
@transform(buildUCSCGeneSet,
suffix("ensembl.dir/geneset_all.gtf.gz"),
PARAMS['interface_geneset_cds_gtf'])
def buildCdsTranscript(infile, outfile):
'''
Output the CDS features from an ENSEMBL gene set
takes all of the features from a :term:`gtf` file
that are feature types of ``CDS``.
Note - we have not filtered on gene_biotype because some of the CDS
are classified as polymorphic_pseudogene.
Arguments
---------
infile : from ruffus
ENSEMBL geneset, filename named in pipeline.ini
outfile : from ruffus
Output filename named in pipeline.ini
filteroption : string
Filter option set in the piepline.ini as feature column in GTF
nomenclature
'''
m = PipelineGtfsubset.SubsetGTF(infile)
filteroption = PARAMS['ensembl_cgat_feature']
filteritem = ["CDS"]
m.filterGTF(outfile, filteroption, filteritem, operators=None)
@transform(buildUCSCGeneSet,
suffix("ensembl.dir/geneset_all.gtf.gz"),
PARAMS['interface_geneset_exons_gtf'])
def buildExonTranscript(infile, outfile):
'''
Output of the exon features from an ENSEMBL gene set
Takes all of the features from a :term:`gtf` file
that are features of ``exon``
Arguments
---------
infile : from ruffus
ENSEMBL geneset, filename named in pipeline.ini
outfile : from ruffus
Output filename named in pipeline.ini
filteroption : string
Filter option set in the piepline.ini as feature column in GTF
nomenclature
'''
m = PipelineGtfsubset.SubsetGTF(infile)
filteroption = PARAMS['ensembl_cgat_feature']
filteritem = ["exon"]
m.filterGTF(outfile, filteroption, filteritem, operators=None)
@transform(buildUCSCGeneSet,
suffix("ensembl.dir/geneset_all.gtf.gz"),
PARAMS['interface_geneset_coding_exons_gtf'])
def buildCodingExonTranscript(infile, outfile):
'''
Output of the coding exon features from abn ENSEMBL gene set
Takes all of the features from a :term:`gtf` file
that are features of ``exon``
Arguments
---------
infile : from ruffus
ENSEMBL geneset, filename named in pipeline.ini
outfile : from ruffus
Output filename named in pipeline.ini
filteroption : string
Filter option set in the piepline.ini as feature column in GTF
nomenclature
'''
m = PipelineGtfsubset.SubsetGTF(infile)
filteroption = [PARAMS['ensembl_cgat_feature'],
PARAMS['ensembl_cgat_gene_biotype']]
filteritem = ["exon", "protein_coding"]
m.filterGTF(outfile, filteroption, filteritem, operators="and")
@transform(buildUCSCGeneSet,
suffix("ensembl.dir/geneset_all.gtf.gz"),
PARAMS['interface_geneset_lincrna_exons_gtf'])
def buildLincRNAExonTranscript(infile, outfile):
'''
Output of the lincRNA features from an ENSEMBL gene set
Takes all of the features from a :term:`gtf` file
that are features of ``lincRNA``
Arguments
---------
infile : from ruffus
ENSEMBL geneset, filename named in pipeline.ini
outfile : from ruffus
Output filename named in pipeline.ini
filteroption : string
Filter option set in the piepline.ini as feature column in GTF
nomenclature
'''
m = PipelineGtfsubset.SubsetGTF(infile)
filteroptions = [PARAMS['ensembl_cgat_feature'],
PARAMS['ensembl_cgat_gene_biotype']]
filteritem = ["exon", "lincRNA"]
m.filterGTF(outfile, filteroptions, filteritem, operators="and")
@transform(buildUCSCGeneSet,
suffix("ensembl.dir/geneset_all.gtf.gz"),
PARAMS['interface_geneset_noncoding_exons_gtf'])
def buildNonCodingExonTranscript(infile, outfile):
'''
Output of the non-coding exon features from an ENSEMBL gene set
Remove all of the features from a :term:`gtf` file
that are features of ``exon`` and are protein-coding
Arguments
---------
infile : from ruffus
ENSEMBL geneset, filename named in pipeline.ini
outfile : from ruffus
Output filename named in pipeline.ini
filteroption : string
Filter option set in the piepline.ini as feature column in GTF
nomenclature
'''
m = PipelineGtfsubset.SubsetGTF(infile)
filteroptions = [PARAMS['ensembl_cgat_feature'],
PARAMS['ensembl_cgat_gene_biotype']]
filteritem = ["exon", "protein_coding"]
m.filterGTF(outfile, filteroptions, filteritem, operators="and not")
@transform((buildUCSCGeneSet,
buildCdsTranscript,
buildExonTranscript,
buildCodingExonTranscript,
buildNonCodingExonTranscript,
buildLincRNAExonTranscript),
suffix(".gtf.gz"), "_gtf.load")
def loadTranscripts(infile, outfile):
'''load transcripts from a GTF file into the database.
The table will be indexed on ``gene_id`` and ``transcript_id``
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Logfile. The table name is derived from `outfile`.
'''
job_memory = PARAMS["job_highmemory"]
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-index=gene_id "
"--add-index=transcript_id "
"--allow-empty-file ")
statement = '''
gunzip < %(infile)s
| cgat gtf2tsv -f
| %(load_statement)s
> %(outfile)s'''
P.run()
@P.add_doc(PipelineGtfsubset.buildFlatGeneSet)
@transform(buildUCSCGeneSet,
suffix("ensembl.dir/geneset_all.gtf.gz"),
PARAMS['interface_geneset_flat_gtf'])
def buildFlatGeneSet(infile, outfile):
PipelineGtfsubset.buildFlatGeneSet(infile, outfile,
job_memory=PARAMS["job_highmemory"])
####################################################################
# Geneset derived annotations
####################################################################
@follows(mkdir("geneset.dir"))
@transform(buildUCSCGeneSet,
suffix("ensembl.dir/geneset_all.gtf.gz"),
PARAMS["interface_ref_flat"])
def buildRefFlat(infile, outfile):
'''build flat geneset for Picard RnaSeqMetrics.
'''
tmpflat = P.getTempFilename(".")
job_memory = PARAMS["job_memory"]
statement = '''
gtfToGenePred -genePredExt -geneNameAsName2 %(infile)s %(tmpflat)s;
paste <(cut -f 12 %(tmpflat)s) <(cut -f 1-10 %(tmpflat)s)
> %(outfile)s
'''
P.run()
os.unlink(tmpflat)
@transform((buildCodingExonTranscript,
buildNonCodingExonTranscript,
buildLincRNAExonTranscript),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_transcript_region.bed.gz')
def buildTranscriptRegions(infile, outfile):
"""
export a table of seleno cysteine transcripts.
Selenocysteine containing transcripts are identified by checking
if their protein sequence contains ``U``.
The table contains a single column ``transcript_id`` with ENSEMBL
transcript identifiers as values.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
job_memory = PARAMS["job_memory"]
statement = """
gunzip < %(infile)s
| cgat gtf2gtf --method=join-exons
--log=%(outfile)s.log
| cgat gff2bed --is-gtf
--set-name=transcript_id
--log=%(outfile)s.log
| gzip
> %(outfile)s """
P.run()
@transform((buildCodingExonTranscript,
buildNonCodingExonTranscript,
buildLincRNAExonTranscript),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_gene_region.bed.gz')
def buildGeneRegions(infile, outfile):
"""build a :term:`bed` file of regions spanning whole gene models.
This method outputs a single interval spanning the genomic region
that covers all transcripts within a particular gene.
The name column of the :term:`bed` file is set to the `gene_id`.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
job_memory = PARAMS["job_memory"]
statement = """
gunzip < %(infile)s
| cgat gtf2gtf
--method=merge-transcripts
--log=%(outfile)s.log
| cgat gff2bed --is-gtf --set-name=gene_id
--log=%(outfile)s.log
| gzip
> %(outfile)s """
P.run()
@follows(mkdir("geneset.dir"))
@transform((buildCodingExonTranscript,
buildNonCodingExonTranscript,
buildLincRNAExonTranscript),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_transcript_tss.bed.gz')
def buildTranscriptTSS(infile, outfile):
"""build a :term:`bed` file with transcription start sites.
This method outputs all transcription start sites within a
geneset. The trancription start site is derived from the most
upstream coordinate of each transcript.
The name column of the :term:`bed` file is set to the
`transcript_id`.
Arguments
---------
infile : list
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
job_memory = PARAMS["job_memory"]
statement = """
gunzip < %(infile)s
| cgat gtf2gtf --method=join-exons
--log=%(outfile)s.log
| cgat gtf2gff --method=promotors
--promotor-size=1
--genome-file=%(genome_dir)s/%(genome)s --log=%(outfile)s.log
| cgat gff2bed --is-gtf --set-name=transcript_id
--log=%(outfile)s.log
| gzip
> %(outfile)s """
P.run()
@transform((buildCodingExonTranscript,
buildNonCodingExonTranscript,
buildLincRNAExonTranscript),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_gene_tssinterval.bed.gz')
def buildGeneTSSInterval(infile, outfile):
"""build a :term:`bed` file with intervals that cover all transcription
start sites within a gene.
This method outputs for each gene the smallest genomic region that covers
all the transcription start sites within that gene.
The name column of the :term:`bed` file is set to the
`gene_id`.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
job_memory = PARAMS["job_memory"]
statement = """
gunzip < %(infile)s
| cgat gtf2gtf
--method=join-exons
--log=%(outfile)s.log
| cgat gtf2gff
--method=promotors
--promotor-size=1
--genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
| sed s/transcript/exon/g
| sed s/exon_id/transcript_id/g
| cgat gtf2gtf
--method=merge-transcripts
--log=%(outfile)s.log
| cgat gff2bed
--is-gtf
--set-name=transcript_id
--log=%(outfile)s.log
| gzip
> %(outfile)s """
P.run()
@transform((buildCodingExonTranscript,
buildNonCodingExonTranscript,
buildLincRNAExonTranscript),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_transcript_tts.bed.gz')
def buildTranscriptTTS(infile, outfile):
"""build a :term:`bed` file with transcription termination sites.
This method outputs all transcription start sites within a
geneset. The trancription start site is derived from the most
downstream coordinate of each transcript.
The name column of the :term:`bed` file is set to the
`transcript_id`.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
job_memory = PARAMS["job_memory"]
statement = """
gunzip < %(infile)s
| cgat gtf2gtf --method=join-exons
--log=%(outfile)s.log
| cgat gtf2gff --method=tts
--promotor-size=1
--genome-file=%(genome_dir)s/%(genome)s --log=%(outfile)s.log
| cgat gff2bed --is-gtf --set-name=transcript_id
--log=%(outfile)s.log
| gzip
> %(outfile)s """
P.run()
@follows(mkdir("geneset.dir"))
@transform((buildCodingExonTranscript,
buildNonCodingExonTranscript,
buildLincRNAExonTranscript),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_gene_tss.bed.gz')
def buildGeneTSS(infile, outfile):
"""build a :term:`bed` file with transcription start sites per gene.
This method outputs a single transcription start sites for each
gene within a geneset. The trancription start site is derived from
the most upstream coordinate of each gene.
The name column of the :term:`bed` file is set to the
`gene_id`.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
job_memory = PARAMS["job_memory"]
statement = """gunzip < %(infile)s
| cgat gtf2gtf
--method=merge-transcripts
--log=%(outfile)s.log
| cgat gtf2gff --method=promotors --promotor-size=1
--genome-file=%(genome_dir)s/%(genome)s --log=%(outfile)s.log
| cgat gff2bed --is-gtf --set-name=gene_id
--log=%(outfile)s.log
| gzip
> %(outfile)s"""
P.run()
@transform((buildCodingExonTranscript,
buildNonCodingExonTranscript,
buildLincRNAExonTranscript),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_gene_tts.bed.gz')
def buildGeneTTS(infile, outfile):
"""build a :term:`bed` file with transcription termination sites per gene.
This method outputs a single transcription start sites for each
gene within a geneset. The trancription start site is derived from
the most downstream coordinate of each gene.
The name column of the :term:`bed` file is set to the
`gene_id`.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
job_memory = PARAMS["job_memory"]
statement = """gunzip < %(infile)s
| cgat gtf2gtf
--method=merge-transcripts
--log=%(outfile)s.log
| cgat gtf2gff --method=tts --promotor-size=1
--genome-file=%(genome_dir)s/%(genome)s --log=%(outfile)s.log
| cgat gff2bed --is-gtf --set-name=gene_id
--log=%(outfile)s.log
| gzip
> %(outfile)s"""
P.run()
@transform(buildGeneRegions,
regex('(.*)_.*.bed.gz'),
add_inputs(buildContigSizes),
r'\1_intergenic.bed.gz')
def buildIntergenicRegions(infiles, outfile):
"""build a :term:`bed` file with regions not overlapping any genes.
Arguments
---------
infiles : list
- Input filename with geneset in :term:`gtf` format.
- Input filename with chromosome sizes in :term:`tsv` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
infile, contigs = infiles
job_memory = PARAMS["job_memory"]
statement = '''zcat %(infile)s
| sort -k1,1 -k2,2n
| complementBed -i stdin -g %(contigs)s
| gzip
> %(outfile)s'''
P.run()
@P.add_doc(PipelineGtfsubset.loadGeneInformation)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@follows(mkdir('ensembl.dir'))
@transform(PARAMS["ensembl_filename_gtf"],
suffix(PARAMS["ensembl_filename_gtf"]),
"ensembl.dir/gene_info.load")
def loadGeneInformation(infile, outfile):
'''load the transcript set.'''
PipelineGtfsubset.loadGeneInformation(infile, outfile,
job_memory=PARAMS["job_highmemory"])
@follows(loadGeneInformation)
@originate("protein_coding_gene_ids.tsv")
def identifyProteinCodingGenes(outfile):
'''Output a list of proteing coding gene identifiers
Identify protein coding genes from the annotation database table
and output the gene identifiers
Parameters
----------
oufile : str
Output file of :term:`gtf` format
annotations_interface_table_gene_info : str
:term:`PARAMS`. Database table name for gene information
'''
dbh = connect()
select = dbh.execute("""SELECT DISTINCT gene_id
FROM gene_info
WHERE gene_biotype = 'protein_coding'""" % locals())
with IOTools.openFile(outfile, "w") as outf:
outf.write("gene_id\n")
outf.write("\n".join((x[0] for x in select)) + "\n")
@transform(buildUCSCGeneSet,
regex(".*"),
PARAMS['interface_utr_all_gtf'])
def buildUtrGeneSet(infile, outfile):
job_memory = PARAMS["job_memory"]
statement = "zcat %(infile)s | grep 'utr' | gzip > %(outfile)s"
P.run()
@transform(buildFlatGeneSet,
regex(".*"),
add_inputs(identifyProteinCodingGenes,
buildExonTranscript),
PARAMS['interface_geneset_intron_gtf'])
def buildIntronGeneModels(infiles, outfile):
'''build protein-coding intron-transcipts
Retain the protein coding genes from the input gene set and
convert the exonic sequences to intronic sequences. 10 bp is
truncated on either end of an intron and need to have a minimum
length of 100. Introns from nested genes might overlap, but all
exons are removed.
Parameters
----------
infiles : list
infiles[0] : str
Input filename in :term:`gtf` format
infiles[1] : str
Input filename in :term:`tsv` format
outfile: str
Output filename in :term:`gtf` format
annotations_interface_geneset_exons_gtf: str, PARAMS
Filename for :term:`gtf` format file containing gene set exons
'''
infile, genes_tsv, filename_exons = infiles
job_memory = PARAMS["job_memory"]
statement = '''
zcat %(infile)s
| cgat gtf2gtf
--method=filter
--map-tsv-file=%(genes_tsv)s
--log=%(outfile)s.log
| cgat gtf2gtf
--method=sort
--sort-order=gene
| cgat gtf2gtf
--method=exons2introns
--intron-min-length=100
--intron-border=10
--log=%(outfile)s.log
| cgat gff2gff
--method=crop
--crop-gff-file=%(filename_exons)s
--log=%(outfile)s.log
| cgat gtf2gtf
--method=set-transcript-to-gene
--log=%(outfile)s.log
| awk -v OFS="\\t" -v FS="\\t" '{$3="intron"; print}'
| gzip
> %(outfile)s
'''
P.run()
# Next need to add identifyProteinCodingGenes, buildIntronGeneModels
# aim is to generate the intron gtf here for use in bamstats
################################################################
# UCSC derived annotations
################################################################
@follows(mkdir('ucsc.dir'))
@originate(PARAMS["interface_rna_gff"])
def importRNAAnnotationFromUCSC(outfile):
"""This task downloads UCSC repetetive RNA types.
"""
PipelineGtfsubset.getRepeatDataFromUCSC(
dbhandle=connectToUCSC(),
repclasses=P.asList(PARAMS["ucsc_rnatypes"]),
outfile=outfile,
remove_contigs_regex=PARAMS["ncbi_remove_contigs"],
job_memory=PARAMS["job_memory"])
@follows(mkdir('ucsc.dir'))
@originate(PARAMS["interface_repeats_gff"])
def importRepeatsFromUCSC(outfile):
"""This task downloads UCSC repeats types as identified
in the configuration file.
"""
PipelineGtfsubset.getRepeatDataFromUCSC(
dbhandle=connectToUCSC(),
repclasses=P.asList(PARAMS["ucsc_repeattypes"]),
outfile=outfile,
job_memory=PARAMS["job_memory"])
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform((importRepeatsFromUCSC,
importRNAAnnotationFromUCSC),
suffix(".gff.gz"), "_gff.load")
def loadRepeats(infile, outfile):
"""load genomic locations of repeats into database.
This method loads the genomic coordinates (contig, start, end)
and the repeat name into the database.
Arguments
---------
infile : string
Input filename in :term:`gff` with repeat annotations.
outfile : string
Output filename with logging information. The table name is
derived from outfile.
"""
job_memory = PARAMS["job_memory"]
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-index=class "
"--header-names=contig,start,stop,class")
statement = """zcat %(infile)s
| cgat gff2bed --set-name=class
| grep -v "#"
| cut -f1,2,3,4
| %(load_statement)s
> %(outfile)s"""
P.run()
#######################################################
# miRBase annotations
########################################################
@follows(mkdir("mirbase.dir"))
@transform(PARAMS['mirbase_filename_mir_gff'],
suffix(PARAMS['mirbase_filename_mir_gff']),
PARAMS['interface_geneset_primary_mir_gff'])
def buildmiRPrimaryTranscript(infile, outfile):
'''
This function will subset a miRbase annotation gff3 file.The GFF3
file can be downloaded from miRbase. Make sure the annotation matches
the genome build that you are using.
This function will subset the GFF3 file by selecting annotations that are
labled "miRNA_primary_transcript"
'''
m = PipelineGtfsubset.SubsetGFF3(infile)
filteroption = PARAMS['ensembl_cgat_feature']
filteritem = ["miRNA_primary_transcript"]
m.filterGFF3(outfile, filteroption, filteritem)
@follows(buildmiRPrimaryTranscript)
@transform(PARAMS['mirbase_filename_mir_gff'],
suffix(PARAMS['mirbase_filename_mir_gff']),
PARAMS['interface_geneset_mir_gff'])
def buildmiRNonPrimaryTranscript(infile, outfile):
'''
This function will subset a miRbase annotation gff3 file.The GFF3
file can be downloaded from miRbase. Make sure the annotation matches
the genome build that you are using.
This function will subset the GFF3 file by selecting annotations that are
labled "miRNA". This will subset all of the non primary transcripts.
'''
m = PipelineGtfsubset.SubsetGFF3(infile)
filteroption = PARAMS['ensembl_cgat_feature']
filteritem = ["miRNA"]
m.filterGFF3(outfile, filteroption, filteritem)
@transform((buildmiRPrimaryTranscript,
buildmiRNonPrimaryTranscript),
suffix(".gff3.gz"), "_gff3.load")
def loadmiRNATranscripts(infile, outfile):
'''load transcripts from a GFF3 file into the database.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gff3` format.
outfile : string
Logfile. The table name is derived from `outfile`.
'''
job_memory = PARAMS["job_memory"]
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--allow-empty-file "
"--header-names=feature,Name")
statement = '''
export LANG=en_GB.UTF-8 && zcat %(infile)s
| cgat gtf2tsv --is-gff3 --attributes-as-columns 2> /dev/null
| grep -v "#"
| cut -f3,12
|%(load_statement)s
> %(outfile)s'''
P.run()
###############################################################
# Ontologies
###############################################################
@P.add_doc(PipelineGO.createGOFromENSEMBL)
@follows(mkdir('ontologies.dir'))
@files([(None, PARAMS["interface_go_ensembl"]), ])
def createGO(infile, outfile):
'''
Downloads GO annotations from ensembl
Uses the go_host, go_database and go_port parameters from the ini file
and runs the runGO.py "filename-dump" option.
This calls DumpGOFromDatabase from GO.py
'''
PipelineGO.createGOFromENSEMBL(infile, outfile,
job_memory=PARAMS["job_highmemory"])
@P.add_doc(PipelineGO.createGOSlimFromENSEMBL)
@transform(createGO,
regex("(.*)"),
PARAMS["interface_goslim_ensembl"])
def createGOSlim(infile, outfile):
'''
Downloads GO slim annotations from ensembl
'''
E.warn(PARAMS['go_url_goslim'])
PipelineGO.createGOSlimFromENSEMBL(infile, outfile,
job_memory=PARAMS["job_highmemory"])
@P.add_doc(P.load)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform((createGO, createGOSlim),
suffix(".tsv.gz"),
r"\1_assignments.load")
def loadGOAssignments(infile, outfile):
'''
Load GO assignments into database.'''
P.load(infile, outfile,
options="--add-index=gene_id --add-index=go_id")
################################################################
# Enrichment analysis
#################################################################
@P.add_doc(PipelineGeneset.annotateGenome)
@follows(mkdir('enrichment.dir'))
@files(buildUCSCGeneSet, PARAMS['interface_annotation_gff'])
def annotateGenome(infile, outfile):
"""This task only considers protein coding genes as
processed_transcripts tend to cover larger genomic regions and
often overlap between adjacent protein coding genes.
"""
PipelineGeneset.annotateGenome(infile,
outfile,
only_proteincoding=True,
job_memory=PARAMS["job_memory"])
@P.add_doc(PipelineGeneset.annotateGeneStructure)
@follows(mkdir('enrichment.dir'))
@files(buildUCSCGeneSet, PARAMS['interface_genestructure_gff'])
def annotateGeneStructure(infile, outfile):
"""This task only considers protein coding genes as
processed_transcripts tend to cover larger genomic regions and
often overlap between adjacent protein coding genes.
"""
PipelineGeneset.annotateGeneStructure(infile,
outfile,
only_proteincoding=True,
job_memory=PARAMS["job_memory"])
@follows(mkdir('enrichment.dir'))
@merge(buildFlatGeneSet, PARAMS["interface_territories_gff"])
def buildGeneTerritories(infile, outfile):
"""build gene territories from protein coding genes.
The territory of a gene is defined as the region of the
gene extended by a certain radius on either end. If the
gene territories of two genes overlap, they are resolved
at the mid-point between the two adjacent genes.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gff` format.
enrichment_territories_radius : int
see :term:`PARAMS`
"""
job_memory = PARAMS["job_highmemory"]
statement = '''
zcat %(infile)s
| cgat gtf2gtf
--method=filter
--filter-method=proteincoding
--log=%(outfile)s.log
| cgat gtf2gtf
--method=sort --sort-order=gene
| cgat gtf2gtf
--method=merge-transcripts
| cgat gtf2gtf
--method=sort --sort-order=position
| cgat gtf2gff
--genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
--territory-extension=%(enrichment_territories_radius)s
--method=territories
| cgat gtf2gtf
--method=filter
--filter-method=longest-gene
--log=%(outfile)s.log
| gzip
> %(outfile)s '''
P.run()
@P.add_doc(PipelineGeneset.buildGenomicFunctionalAnnotation)
@follows(mkdir('enrichment.dir'))
@merge((buildGeneTerritories, loadGOAssignments),
(PARAMS["interface_genomic_function_bed"],
PARAMS["interface_genomic_function_tsv"],
))
def buildGenomicFunctionalAnnotation(infiles, outfiles):
territories_gtf_file = infiles[0]
PipelineGeneset.buildGenomicFunctionalAnnotation(
territories_gtf_file,
dbh=connect(),
outfiles=outfiles,
job_memory=PARAMS["job_memory"])
@P.add_doc(PipelineGtfsubset.buildGenomicContext)
@follows(mkdir('enrichment.dir'))
@merge((importRepeatsFromUCSC,
importRNAAnnotationFromUCSC,
buildUCSCGeneSet,
buildUtrGeneSet,
buildIntronGeneModels),
PARAMS["interface_genomic_context_bed"])
def buildGenomicContext(infiles, outfile):
PipelineGtfsubset.buildGenomicContext(infiles, outfile,
job_memory=PARAMS["job_highmemory"])
##############################################################
# Define final task
###############################################################
@follows(buildUCSCGeneSet,
buildContigSizes,
buildExonTranscript,
buildCodingExonTranscript,
buildFlatGeneSet,
buildIntronGeneModels,
buildGenomicContext,
buildRefFlat,
importRNAAnnotationFromUCSC)
def lite():
'''
build only tasks that are used by the common upstream pipeline,
pipeline_readqc, mapping and bamstats. To run the downstream pipelines
run the full task so all annotations are made.
'''
pass
@follows(buildGenomicContext,
buildGenomicFunctionalAnnotation)
def enrichment():
"""convenience target : annotations for enrichment analysis"""
@follows(buildTranscriptRegions,
buildTranscriptTSS,
buildTranscriptTTS,
buildGeneRegions,
buildGeneTSS,
buildGeneTTS,
buildGeneTSSInterval,
buildIntergenicRegions)
def geneset():
"""convenience target : geneset derived annotations"""
@follows(loadTranscripts,
loadRepeats,
loadmiRNATranscripts,
loadGeneInformation,
buildFlatGeneSet,
buildRefFlat,
buildUtrGeneSet,
buildContigBed,
buildGeneTerritories,
geneset,
enrichment)
def full():
'''build all targets - A dummy task to run the pipeline to
completion.'''
pass
@follows(mkdir("Report.dir"))
def build_report():
'''report dummy task'''
pass
def main(argv=None):
if argv is None:
argv = sys.argv
P.main(argv)
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| {
"content_hash": "543dd5117a11cdf6d1b4028d08902db0",
"timestamp": "",
"source": "github",
"line_count": 1624,
"max_line_length": 79,
"avg_line_length": 30.19704433497537,
"alnum_prop": 0.6408849918433932,
"repo_name": "CGATOxford/CGATPipelines",
"id": "0eef8a92d65190f9a5c93c1c0a6ac19eb1ee658c",
"size": "49040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CGATPipelines/pipeline_genesets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4360"
},
{
"name": "HTML",
"bytes": "40732"
},
{
"name": "JavaScript",
"bytes": "302029"
},
{
"name": "Jupyter Notebook",
"bytes": "4393775"
},
{
"name": "Makefile",
"bytes": "45084"
},
{
"name": "Python",
"bytes": "5357820"
},
{
"name": "R",
"bytes": "62312"
},
{
"name": "Shell",
"bytes": "67312"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import configuration
import numpy as np
from LSTM_model import LSTM_model
def main():
config = configuration.ModelConfig(data_filename="input_seqs_eval")
train(config)
def train(config):
with tf.Graph().as_default():
model = LSTM_model(config)
inputs_seqs_batch, outputs_batch = model.reader.read(shuffle=False, num_epochs=1)
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
sess = tf.Session()
sess.run(init_op)
saver = tf.train.Saver(tf.all_variables())
global_steps = 0
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
saver.restore(sess, "./save/LSTM/save-60000")
correct_count = 0
evaled_count = 0
try:
while not coord.should_stop():
input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
probs = sess.run([model.probs], {model.input_data: input_data,
model.targets: targets})
probs = np.array(probs).reshape([-1, config.vocab_size])
targets = np.array([t[0] for t in targets])
output = np.argmax(probs, axis=1)
correct_count += np.sum(output == targets)
evaled_count += len(output)
except tf.errors.OutOfRangeError:
pass
finally:
# When done, ask the threads to stop.
coord.request_stop()
print("Accuracy: %f" % (float(correct_count) / evaled_count))
coord.join(threads)
sess.close()
if __name__ == "__main__":
main()
| {
"content_hash": "fb18c5e15f361630f3c190b9cdd6569a",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 85,
"avg_line_length": 30.615384615384617,
"alnum_prop": 0.6168341708542714,
"repo_name": "jxwufan/AssociativeRetrieval",
"id": "8984fe7e06595e92dd92763b27f3daa815944676",
"size": "1592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LSTM_eval.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24808"
}
],
"symlink_target": ""
} |
__license__ = "Apache 2.0"
__copyright__ = "Copyright 2008 nb.io"
__author__ = "Randy Reddig - ydnar@nb.io"
from django.template import Library, Node
from django.conf import settings
from nbio.django.shortcuts import build_url
class UrlNode(Node):
def __init__(self, host=None, path='', allow_override=False):
self.host = host
self.path = path
self.allow_override = allow_override
if not self.path.startswith('/'):
self.path = '/' + self.path
def render(self, context):
host = self.host
request = context.get('request')
if self.allow_override and request and request.META.get('X_OVERRIDE_SERVER_NAME'):
host = request.META.get('X_OVERRIDE_SERVER_NAME')
host = host or settings.HOSTS['app']
return build_url(request, host=host, path=self.path)
def static_url(parser, token):
try:
path = ''
tag_name, path = token.split_contents()
if hasattr(settings, 'STATIC_PREFIX'):
path = settings.STATIC_PREFIX + path
except ValueError:
pass
return UrlNode(host=settings.HOSTS['static'], path=path, allow_override=True)
def app_url(parser, token):
try:
path = ''
tag_name, path = token.split_contents()
except ValueError:
pass
return UrlNode(host=settings.HOSTS['app'], path=path, allow_override=True)
register = Library()
register.tag("static_url", static_url)
register.tag("app_url", app_url)
| {
"content_hash": "fb3298093537ee71d33656fe06a4318b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 86,
"avg_line_length": 27.82,
"alnum_prop": 0.6757728253055356,
"repo_name": "nbio/nbio-django",
"id": "930588c8ead4f29d95212cb156194027bdcbff2e",
"size": "1391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templatetags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10960"
}
],
"symlink_target": ""
} |
import io
import math
import os
import re
import shlex
import subprocess
from collections import deque
from threading import Thread
import eventlet
from eventlet.green.subprocess import Popen
from app import db, config, app
from app.library.formatters import duration_to_seconds
from app.models.file import File
from app.modules.mod_process.process_repository import ProcessRepository
# we need to monkey patch the threading module, see http://eventlet.net/doc/patching.html
eventlet.monkey_patch(thread=True)
# the pattern to fetch meta information of the current progress
# to match a line like
# frame=44448 fps= 14 q=-0.0 Lsize= 247192kB time=00:30:53.95 bitrate=1092.3kbits/s speed=0.577x
PROGRESS_PATTERN = re.compile(r"frame=\s*(\d+) fps=\s*(.+) q=(.+) L?size=\s*(\d+)kB time=(.+) bitrate=\s*(.+)kbits/s(?:P speed=(.+)x)?")
class Process(Thread):
def __init__(self, file):
Thread.__init__(self)
self.file = file
self.active = True
def run(self):
"""
run the encoding
"""
# probe file first
frame_count = self.ffmpeg_probe_frame_count()
if frame_count == -1:
app.logger.debug("Probing of " + self.file.filename + " failed - aborting...")
ProcessRepository.file_failed(self.file)
return
# app.logger.debug("Probing of " + file.filename + " successful.. frame count: " + str(frame_count))
split_path = os.path.split(self.file.filename)
path = split_path[0]
original_filename = split_path[1]
filename_noext = os.path.split(os.path.splitext(original_filename)[0])[1]
# form output filename and store it in self.file for later use
self.file.output_filename = filename_noext + ".pyencode"
cmd = ["ffmpeg"]
cmd.extend(["-i", self.file.filename])
# add parameters from config
cmd.extend(shlex.split(config.get("encoding", "parameters")))
cmd.extend(["-y", path + os.sep + self.file.output_filename])
app.logger.debug("Starting encoding of " + self.file.filename + " with %s" % " ".join(cmd))
for info in self.run_ffmpeg(cmd, frame_count):
if info["return_code"] != -1:
app.logger.debug("Error occured while running ffmpeg. Last lines of output: ")
app.logger.debug("\n".join(info["last_lines"]))
ProcessRepository.file_failed(self.file)
return
# store information in database
# convert kB to bytes
info["ffmpeg_size"] *= 1024
# we don't need the return_code anymore (and don't want to store it)
info.pop("return_code")
# update file in DB
File.query.filter_by(id=self.file.id).update(info)
db.session.commit()
# update self.file
for k in info:
setattr(self.file, k, info[k])
# tell ProcessRepository there's some progress going on
ProcessRepository.file_progress(self.file)
if self.active:
ProcessRepository.file_done(self.file)
def ffmpeg_probe_frame_count(self):
"""
probe self.file and return frame count
"""
instance = Popen(["ffprobe", self.file.filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = ""
for line in instance.stderr:
output += line.decode("utf8")
# call sleep, see https://stackoverflow.com/questions/34599578/using-popen-in-a-thread-blocks-every-incoming-flask-socketio-request
eventlet.sleep()
fps_reg = re.findall(r"([0-9]*\.?[0-9]+) fps|tbr", output)
if fps_reg is None:
return -1
fps = float(" ".join(fps_reg))
duration = duration_to_seconds(re.findall(r"Duration: (.*?),", output)[0])
# calculate the amount of frames for the calculation of progress
frame_count = int(math.ceil(duration * float(fps)))
return frame_count
def stop(self):
"""
stop this process
"""
self.active = False
def run_ffmpeg(self, cmd, frame_count):
"""
run ffmpeg with given cmd arguments and a frame count
:param cmd: the command line dictionary containing all the arguments
:param frame_count: the amount of frames of this video, necessary for the progress calculation
:return:
"""
instance = Popen(map(str, cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
reader = io.TextIOWrapper(instance.stderr, encoding="utf8")
# these two variables are just needed for when the processing fails, see below
last_lines = deque(maxlen=5) # parameter determines how many lines to keep
# oddly ffmpeg writes to stderr instead of stdout
for line in reader:
# kill ffmpeg when not being active anymore
if not self.active:
instance.kill()
# call sleep, see https://stackoverflow.com/questions/34599578/using-popen-in-a-thread-blocks-every-incoming-flask-socketio-request
eventlet.sleep()
# append current line to last_lines
last_lines.append(line)
match = PROGRESS_PATTERN.match(line)
# first few lines have no match
if match:
frame = int(match.group(1)) # current frame, needed for calculation of progress
fps = float(match.group(2)) # needed for calculation of remaining time
size = int(match.group(4)) # current size in kB
time = duration_to_seconds(match.group(5)) # time already passed for converting, in seconds
bitrate = float(match.group(6)) # in kbits/s
progress = round((frame / float(frame_count)) * 100, 1) # in %
frames_remaining = frame_count - frame # needed for eta
eta = frames_remaining / fps if fps != 0 else -1 # in seconds
yield {"return_code": -1, "ffmpeg_eta": eta, "ffmpeg_progress": progress, "ffmpeg_bitrate": bitrate,
"ffmpeg_time": time, "ffmpeg_size": size, "ffmpeg_fps": fps}
return_code = instance.wait()
if return_code != 0:
yield {"return_code": return_code, "last_lines": last_lines}
| {
"content_hash": "b209267e1d9de7ca9ce5d7c422a7a3e5",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 143,
"avg_line_length": 38.551515151515154,
"alnum_prop": 0.606980034585757,
"repo_name": "dhardtke/pyEncode",
"id": "58921bf8e4b81cfe349fe1c2738acc60afdb8ebc",
"size": "6426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/modules/mod_process/process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4488"
},
{
"name": "HTML",
"bytes": "29041"
},
{
"name": "JavaScript",
"bytes": "20857"
},
{
"name": "Python",
"bytes": "68936"
}
],
"symlink_target": ""
} |
from doxygen import DoxygenNode
from sphinx.util.compat import Directive
class concept(DoxygenNode):
def __init__(self, name, inherits, **kwargs):
super(concept,self).__init__(**kwargs)
self.inherits = inherits
self.name = name
def render(self):
# template = self.environment.get_template(self.filename+'.html')
# return template.render(**self.context)
return "<a name=\"concept_%s\"></a>"% self.name.replace(" ","_")
class ConceptDirective(Directive):
has_content = True
def run(self):
env = self.state.document.settings.env
try:
name = self.content[0]
except:
raise BaseException("Please specify a unique name for the concept.")
try:
inherits = self.content[1]
except:
inherits = None
return [concept(name, inherits)]
class link_concept(DoxygenNode):
def __init__(self, variable, scope, name, **kwargs):
super(link_concept,self).__init__(**kwargs)
self.variable = variable
self.scope = scope
self.name = name
def render(self):
# self.ref_register.set_current_doc(self.doc)
# template = self.environment.get_template(self.filename+'.html')
return "" #template.render(**self.context)
class LinkConceptDirective(Directive):
has_content = True
def run(self):
env = self.state.document.settings.env
try:
variable = self.content[0]
except:
raise BaseException("Please specify a variable/function name.")
try:
scope = self.content[1]
except:
raise BaseException("Please specify a scope.")
try:
name = self.content[2]
except:
raise BaseException("Please specify a concept name.")
return [link_concept(variable, scope, name)]
| {
"content_hash": "7d09ce27a48364d762252470d21a3472",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 81,
"avg_line_length": 28.848484848484848,
"alnum_prop": 0.5955882352941176,
"repo_name": "troelsfr/Gasp",
"id": "de6a64e5483558a1bca8ce1d4112dd1c70259ce1",
"size": "1904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gasp/concept.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7484"
},
{
"name": "Python",
"bytes": "28597"
}
],
"symlink_target": ""
} |
"""
SPECSENS calulates the calibration curve given an observation, a standard star,
and the extinction curve for the site. The task assumes a 1-D spectrum that
has already been sensed from the original observations.
Author Version Date
-----------------------------------------------
S. M. Crawford (SAAO) 1.0 21 Mar 2011
TODO
----
LIMITATIONS
-----------
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import os
import sys
import time
import numpy as np
import pyfits
from matplotlib.pyplot import *
from pyraf import iraf
import saltstat
import saltsafekey as saltkey
import saltsafeio as saltio
from saltsafelog import logging
import spectools as st
from spectools import SALTSpecError
from PySpectrograph.Spectra import Spectrum
from saltfit import interfit
from pylab import *
debug = True
# -----------------------------------------------------------
# core routine
def specsens(specfile, outfile, stdfile, extfile, airmass=None, exptime=None,
stdzp=3.68e-20, function='polynomial', order=3, thresh=3, niter=5,
fitter='gaussian', clobber=True, logfile='salt.log', verbose=True):
with logging(logfile, debug) as log:
# read in the specfile and create a spectrum object
obs_spectra = st.readspectrum(specfile.strip(), error=True, ftype='ascii')
# smooth the observed spectrum
# read in the std file and convert from magnitudes to fnu
# then convert it to fwave (ergs/s/cm2/A)
std_spectra = st.readspectrum(stdfile.strip(), error=False, ftype='ascii')
std_spectra.flux = Spectrum.magtoflux(std_spectra.flux, stdzp)
std_spectra.flux = Spectrum.fnutofwave(
std_spectra.wavelength, std_spectra.flux)
# Get the typical bandpass of the standard star,
std_bandpass = np.diff(std_spectra.wavelength).mean()
# Smooth the observed spectrum to that bandpass
obs_spectra.flux = st.boxcar_smooth(obs_spectra, std_bandpass)
# read in the extinction file (leave in magnitudes)
ext_spectra = st.readspectrum(extfile.strip(), error=False, ftype='ascii')
# determine the airmass if not specified
if saltio.checkfornone(airmass) is None:
message = 'Airmass was not supplied'
raise SALTSpecError(message)
# determine the exptime if not specified
if saltio.checkfornone(exptime) is None:
message = 'Exposure Time was not supplied'
raise SALTSpecError(message)
# calculate the calibrated spectra
log.message('Calculating the calibration curve for %s' % specfile)
cal_spectra = sensfunc(
obs_spectra, std_spectra, ext_spectra, airmass, exptime)
# plot(cal_spectra.wavelength, cal_spectra.flux * std_spectra.flux)
# fit the spectra--first take a first cut of the spectra
# using the median absolute deviation to throw away bad points
cmed = np.median(cal_spectra.flux)
cmad = saltstat.mad(cal_spectra.flux)
mask = (abs(cal_spectra.flux - cmed) < thresh * cmad)
mask = np.logical_and(mask, (cal_spectra.flux > 0))
# now fit the data
# Fit using a gaussian process.
if fitter=='gaussian':
from sklearn.gaussian_process import GaussianProcess
#Instanciate a Gaussian Process model
dy = obs_spectra.var[mask] ** 0.5
dy /= obs_spectra.flux[mask] / cal_spectra.flux[mask]
y = cal_spectra.flux[mask]
gp = GaussianProcess(corr='squared_exponential', theta0=1e-2,
thetaL=1e-4, thetaU=0.1, nugget=(dy / y) ** 2.0)
X = np.atleast_2d(cal_spectra.wavelength[mask]).T
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
x = np.atleast_2d(cal_spectra.wavelength).T
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred = gp.predict(x)
cal_spectra.flux = y_pred
else:
fit=interfit(cal_spectra.wavelength[mask], cal_spectra.flux[mask], function=function, order=order, thresh=thresh, niter=niter)
fit.interfit()
cal_spectra.flux=fit(cal_spectra.wavelength)
# write the spectra out
st.writespectrum(cal_spectra, outfile, ftype='ascii')
def sensfunc(obs_spectra, std_spectra, ext_spectra, airmass, exptime):
"""Given an observe spectra, calculate the calibration curve for the
spectra. All data is interpolated to the binning of the obs_spectra.
The calibrated spectra is then calculated from
C = F_obs/ F_std / 10**(-0.4*A*E)/T/dW
where F_obs is the observed flux from the source, F_std is the
standard spectra, A is the airmass, E is the
extinction in mags, T is the exposure time and dW is the bandpass
Parameters
-----------
obs_spectra--spectrum of the observed star (counts/A)
std_spectra--know spectrum of the standard star (ergs/s/cm2/A)
ext_spectra--spectrum of the extinction curve (in mags)
airmass--airmass of the observations
exptime--exposure time of the observations
function
"""
# re-interpt the std_spectra over the same wavelength
std_spectra.interp(obs_spectra.wavelength)
# re-interp the ext_spetra over the same wavelength
ext_spectra.interp(obs_spectra.wavelength)
# create the calibration spectra
cal_spectra = Spectrum.Spectrum(
obs_spectra.wavelength, obs_spectra.flux.copy(), stype='continuum')
# set up the bandpass
bandpass = np.diff(obs_spectra.wavelength).mean()
# correct for extinction
cal_spectra.flux = cal_spectra.flux / \
10 ** (-0.4 * airmass * ext_spectra.flux)
# correct for the exposure time and calculation the sensitivity curve
cal_spectra.flux = cal_spectra.flux / exptime / bandpass / std_spectra.flux
return cal_spectra
# main code
parfile = iraf.osfn("saltspec$specsens.par")
t = iraf.IrafTaskFactory(
taskname="specsens", value=parfile, function=specsens, pkgname='saltspec')
| {
"content_hash": "a44826ec016910f7ef8ca32127035f31",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 138,
"avg_line_length": 36.62721893491124,
"alnum_prop": 0.6516962843295638,
"repo_name": "saltastro/pysalt",
"id": "497cd8ead6b7e020dfcb3c2d86a18526fe891282",
"size": "6366",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "saltspec/specsens.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9334"
},
{
"name": "Common Lisp",
"bytes": "19932"
},
{
"name": "Makefile",
"bytes": "856"
},
{
"name": "Python",
"bytes": "1359528"
},
{
"name": "Smalltalk",
"bytes": "271"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contest', '0004_remove_contestproblem_difficulty'),
]
operations = [
migrations.AddField(
model_name='contestproblem',
name='score',
field=models.IntegerField(default=0),
),
]
| {
"content_hash": "f0536aa8eeb55659313f63260e880a86",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 22,
"alnum_prop": 0.6111111111111112,
"repo_name": "Timeship/OnlineJudge-1",
"id": "e0a025dc0e1582222ee61cdef862a2bccae30ffb",
"size": "420",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "contest/migrations/0005_contestproblem_score.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46937"
},
{
"name": "HTML",
"bytes": "151050"
},
{
"name": "JavaScript",
"bytes": "119114"
},
{
"name": "Python",
"bytes": "312924"
},
{
"name": "Shell",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""Offer time listening automation rules."""
from datetime import datetime
from functools import partial
import voluptuous as vol
from homeassistant.components import sensor
from homeassistant.const import (
ATTR_DEVICE_CLASS,
CONF_AT,
CONF_PLATFORM,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import HassJob, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import (
async_track_point_in_time,
async_track_state_change_event,
async_track_time_change,
)
import homeassistant.util.dt as dt_util
# mypy: allow-untyped-defs, no-check-untyped-defs
_TIME_TRIGGER_SCHEMA = vol.Any(
cv.time,
vol.All(str, cv.entity_domain(("input_datetime", "sensor"))),
msg="Expected HH:MM, HH:MM:SS or Entity ID with domain 'input_datetime' or 'sensor'",
)
TRIGGER_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "time",
vol.Required(CONF_AT): vol.All(cv.ensure_list, [_TIME_TRIGGER_SCHEMA]),
}
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
trigger_id = automation_info.get("trigger_id") if automation_info else None
entities = {}
removes = []
job = HassJob(action)
@callback
def time_automation_listener(description, now, *, entity_id=None):
"""Listen for time changes and calls action."""
hass.async_run_hass_job(
job,
{
"trigger": {
"platform": "time",
"now": now,
"description": description,
"entity_id": entity_id,
"id": trigger_id,
}
},
)
@callback
def update_entity_trigger_event(event):
"""update_entity_trigger from the event."""
return update_entity_trigger(event.data["entity_id"], event.data["new_state"])
@callback
def update_entity_trigger(entity_id, new_state=None):
"""Update the entity trigger for the entity_id."""
# If a listener was already set up for entity, remove it.
remove = entities.pop(entity_id, None)
if remove:
remove()
remove = None
if not new_state:
return
# Check state of entity. If valid, set up a listener.
if new_state.domain == "input_datetime":
has_date = new_state.attributes["has_date"]
if has_date:
year = new_state.attributes["year"]
month = new_state.attributes["month"]
day = new_state.attributes["day"]
has_time = new_state.attributes["has_time"]
if has_time:
hour = new_state.attributes["hour"]
minute = new_state.attributes["minute"]
second = new_state.attributes["second"]
else:
# If no time then use midnight.
hour = minute = second = 0
if has_date:
# If input_datetime has date, then track point in time.
trigger_dt = dt_util.DEFAULT_TIME_ZONE.localize(
datetime(year, month, day, hour, minute, second)
)
# Only set up listener if time is now or in the future.
if trigger_dt >= dt_util.now():
remove = async_track_point_in_time(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
trigger_dt,
)
elif has_time:
# Else if it has time, then track time change.
remove = async_track_time_change(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
hour=hour,
minute=minute,
second=second,
)
elif (
new_state.domain == "sensor"
and new_state.attributes.get(ATTR_DEVICE_CLASS)
== sensor.DEVICE_CLASS_TIMESTAMP
and new_state.state not in (STATE_UNAVAILABLE, STATE_UNKNOWN)
):
trigger_dt = dt_util.parse_datetime(new_state.state)
if trigger_dt is not None and trigger_dt > dt_util.utcnow():
remove = async_track_point_in_time(
hass,
partial(
time_automation_listener,
f"time set in {entity_id}",
entity_id=entity_id,
),
trigger_dt,
)
# Was a listener set up?
if remove:
entities[entity_id] = remove
to_track = []
for at_time in config[CONF_AT]:
if isinstance(at_time, str):
# entity
to_track.append(at_time)
update_entity_trigger(at_time, new_state=hass.states.get(at_time))
else:
# datetime.time
removes.append(
async_track_time_change(
hass,
partial(time_automation_listener, "time"),
hour=at_time.hour,
minute=at_time.minute,
second=at_time.second,
)
)
# Track state changes of any entities.
removes.append(
async_track_state_change_event(hass, to_track, update_entity_trigger_event)
)
@callback
def remove_track_time_changes():
"""Remove tracked time changes."""
for remove in entities.values():
remove()
for remove in removes:
remove()
return remove_track_time_changes
| {
"content_hash": "13538463374955484536f44868265313",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 89,
"avg_line_length": 33.56666666666667,
"alnum_prop": 0.519364448857994,
"repo_name": "adrienbrault/home-assistant",
"id": "69f01672078cc2c408285d1a4994e89c87a18d3b",
"size": "6042",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homeassistant/triggers/time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(self, plotly_name="colorscale", parent_name="heatmapgl", **kwargs):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
**kwargs,
)
| {
"content_hash": "44d4ddd0ad115e7d3c83d89ba81b93cf",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 84,
"avg_line_length": 41.166666666666664,
"alnum_prop": 0.6356275303643725,
"repo_name": "plotly/plotly.py",
"id": "3574f6667bed070522574ea1f9ac1ef3227c0fb8",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/heatmapgl/_colorscale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import glob
sys.path.append("..")
sys.path.append("../data_sequence")
sys.path.append("../helpers")
from detections_pb2 import Detections, Detection
from data_sequence import DataSequence
import os, os.path
from optparse import OptionParser
def open_data_sequence(data_filepath):
assert os.path.exists(data_filepath)
the_data_sequence = DataSequence(data_filepath, Detections)
def data_sequence_reader(data_sequence):
while True:
data = data_sequence.read()
if data is None:
raise StopIteration
else:
yield data
return data_sequence_reader(the_data_sequence)
def parse_arguments():
parser = OptionParser()
parser.description = \
"This program takes a detections.data_sequence created by ./objects_detection and converts it into the Caltech dataset evaluation format"
parser.add_option("-i", "--input", dest="input_path",
metavar="FILE", type="string",
help="path to the folder containing the recordings")
parser.add_option("-o", "--output", dest="output_path",
type="string",
help="path to a directory where the curves are saved")
(options, args) = parser.parse_args()
#print (options, args)
if options.input_path:
if not os.path.exists(options.input_path):
parser.error("Could not find the input file")
else:
parser.error("'input' option is required to run this program")
if options.output_path:
pass
else:
parser.error("'output_path' option is required to run this program")
return options
def getDetections(detections_sequence):
output_detections = []
negCount = 0
posCount = 0
detectionsDict = {}
for detections in detections_sequence:
det_class =0
if detections.image_name.startswith("neg"):
det_class = -1
negCount +=1
else:
det_class = 1
posCount+=1
if (len(detections.detections)) >1:
raise Exception("more than one detection per image")
for detection in detections.detections:
if detection.object_class != Detection.Pedestrian:
continue
detection_data = [det_class, detection.score]
detectionsDict[detections.image_name] = detection.score
#print(detection_data)
output_detections.append(detection_data)
#for key in detectionsDict.iterkeys():
# print (key," ", detectionsDict[key])
#sort detections by score
output_detections = sorted(output_detections, key=lambda det: det[1],reverse=True)
return [output_detections, posCount, negCount]
def saveCurve(detections, posCount, negCount, output_file):
f = open(output_file, "w")
allDet = len(detections)
tp = 0
fp = 0
fn = 0
for det in detections:
if det[0] ==1:
tp +=1
else:
fp +=1
fn = posCount-tp
pr = tp/float(tp+fp)
rec = tp/float(tp + fn)
fppw = float(fp)/allDet
missrate = fn/float(posCount)
line = str(missrate) + " " + str(fppw) + "\n"
f.writelines(line)
#print("missrate: ", fn/float(posCount))
#print("fppw: ", float(fp)/allDet)
f.close()
def detections_to_precision_recall(input_path, output_file):
# get the input file
#input_file = open(options.input_path, "r")
detections_sequence = open_data_sequence(input_path)
# convert data sequence to caltech data format
[detections, posCount, negCount] = getDetections(detections_sequence)
saveCurve(detections, posCount, negCount, output_file)
return
def getFolders(path):
directories = []
for d in glob.glob(os.path.join(path,"*")):
if os.path.isdir(d):
directories.append(d)
return sorted(directories)
def main():
options = parse_arguments()
counter = -4
for d in getFolders(options.input_path):
counter = counter + 4
data_sequence = os.path.join(d, "detections.data_sequence")
output_file = os.path.join(options.output_path, "crop_%03d.txt" %(counter) )
detections_to_precision_recall(data_sequence, output_file)
return
if __name__ == "__main__":
main()
| {
"content_hash": "8e743c5a835a6aeb0924a5e6799275e2",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 145,
"avg_line_length": 25.011904761904763,
"alnum_prop": 0.6454069490718706,
"repo_name": "LevinJ/Pedestrian-detection-and-tracking",
"id": "d85fb67ae586e2514491d2ab055ddaa38cd515bb",
"size": "4249",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/doppia/tools/objects_detection/detections_to_precision_recall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "10112"
},
{
"name": "C",
"bytes": "9241106"
},
{
"name": "C++",
"bytes": "17667301"
},
{
"name": "Cuda",
"bytes": "268238"
},
{
"name": "M",
"bytes": "2374"
},
{
"name": "Makefile",
"bytes": "2488"
},
{
"name": "Matlab",
"bytes": "55886"
},
{
"name": "Objective-C",
"bytes": "176"
},
{
"name": "Perl",
"bytes": "1417"
},
{
"name": "Python",
"bytes": "482759"
},
{
"name": "Shell",
"bytes": "2863"
},
{
"name": "Tcl",
"bytes": "1739"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
if not ("django.contrib.sites" in settings.INSTALLED_APPS):
raise ImproperlyConfigured("django.contrib.sites is required")
# The maximum allowed length for field values.
FIELD_MAX_LENGTH = getattr(settings, "FORMS_BUILDER_FIELD_MAX_LENGTH", 2000)
# The maximum allowed length for field labels.
LABEL_MAX_LENGTH = getattr(settings, "FORMS_BUILDER_LABEL_MAX_LENGTH", 200)
# Sequence of custom fields that will be added to the form field types.
EXTRA_FIELDS = getattr(settings, "FORMS_BUILDER_EXTRA_FIELDS", ())
# Sequence of custom widgets that will add/update form fields widgets.
EXTRA_WIDGETS = getattr(settings, "FORMS_BUILDER_EXTRA_WIDGETS", ())
# The absolute path where files will be uploaded to.
UPLOAD_ROOT = getattr(settings, "FORMS_BUILDER_UPLOAD_ROOT", None)
# Boolean controlling whether HTML5 form fields are used.
USE_HTML5 = getattr(settings, "FORMS_BUILDER_USE_HTML5", True)
# Boolean controlling whether forms are associated to Django's Sites framework.
USE_SITES = getattr(settings, "FORMS_BUILDER_USE_SITES",
hasattr(settings, "SITE_ID"))
# Boolean controlling whether form slugs are editable in the admin.
EDITABLE_SLUGS = getattr(settings, "FORMS_BUILDER_EDITABLE_SLUGS", False)
# Char to start a quoted choice with.
CHOICES_QUOTE = getattr(settings, "FORMS_BUILDER_CHOICES_QUOTE", "`")
# Char to end a quoted choice with.
CHOICES_UNQUOTE = getattr(settings, "FORMS_BUILDER_CHOICES_UNQUOTE", "`")
# Char to use as a field delimiter when exporting form responses as CSV.
CSV_DELIMITER = getattr(settings, "FORMS_BUILDER_CSV_DELIMITER", ",")
# The maximum allowed length for field help text
HELPTEXT_MAX_LENGTH = getattr(settings, "FORMS_BUILDER_HELPTEXT_MAX_LENGTH", 100)
# The maximum allowed length for field choices
CHOICES_MAX_LENGTH = getattr(settings, "FORMS_BUILDER_CHOICES_MAX_LENGTH", 1000)
# Does sending emails fail silently or raise an exception.
EMAIL_FAIL_SILENTLY = getattr(settings, "FORMS_BUILDER_EMAIL_FAIL_SILENTLY",
settings.DEBUG)
# Django SITE_ID - need a default since no longer provided in settings.py.
SITE_ID = getattr(settings, "SITE_ID", 1)
| {
"content_hash": "e69deea19af657e0800db49281510b11",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 81,
"avg_line_length": 40.607142857142854,
"alnum_prop": 0.7559366754617414,
"repo_name": "nimbis/django-forms-builder",
"id": "7a16ba3fd224c0d59182fb0cfea2a15d54185f3e",
"size": "2274",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "forms_builder/forms/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "9383"
},
{
"name": "Python",
"bytes": "81129"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('waldur_rancher', '0012_cluster_initial_data'),
]
operations = [
migrations.RemoveField(model_name='cluster', name='initial_data',),
]
| {
"content_hash": "67ab196456d3a21db8a03aa24cb7cfc9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 75,
"avg_line_length": 21.75,
"alnum_prop": 0.6513409961685823,
"repo_name": "opennode/waldur-mastermind",
"id": "bc59c1e8191e721c4a9de5c2e85f6fe3688b29b1",
"size": "310",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_rancher/migrations/0013_remove_cluster_initial_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4429"
},
{
"name": "Dockerfile",
"bytes": "6258"
},
{
"name": "HTML",
"bytes": "42329"
},
{
"name": "JavaScript",
"bytes": "729"
},
{
"name": "Python",
"bytes": "5520019"
},
{
"name": "Shell",
"bytes": "15429"
}
],
"symlink_target": ""
} |
"""Jarvis
Multi-purpose processing framework with frequently used utilities.
"""
__author__ = ["Alexandru Coman", "Cosmin Poieana"]
__copyright__ = "Copyright (c) 2014 RoPython"
__credits__ = []
__version__ = "0.1a"
__maintainer__ = ["Alexandru Coman", "Cosmin Poieana"]
__email__ = ["alex@ropython.org", "cmin@ropython.org"]
__status__ = "Development"
| {
"content_hash": "94d81ce9ff503893665011a65a9de59b",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 66,
"avg_line_length": 29.583333333333332,
"alnum_prop": 0.6591549295774648,
"repo_name": "RoPython/jarvis",
"id": "9378b0a7333a41e11f41f6d0cdcffcacf79fb0c4",
"size": "355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jarvis/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35309"
},
{
"name": "Shell",
"bytes": "6468"
}
],
"symlink_target": ""
} |
import os
import pickle
import copy
import numpy as np
CODES = {'<PAD>': 0, '<EOS>': 1, '<UNK>': 2, '<GO>': 3 }
def load_data(path):
"""
Load Dataset from File
"""
input_file = os.path.join(path)
with open(input_file, 'r', encoding='utf-8') as f:
return f.read()
def preprocess_and_save_data(source_path, target_path, text_to_ids):
"""
Preprocess Text Data. Save to to file.
"""
# Preprocess
source_text = load_data(source_path)
target_text = load_data(target_path)
source_text = source_text.lower()
target_text = target_text.lower()
source_vocab_to_int, source_int_to_vocab = create_lookup_tables(source_text)
target_vocab_to_int, target_int_to_vocab = create_lookup_tables(target_text)
source_text, target_text = text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int)
# Save Data
with open('preprocess.p', 'wb') as out_file:
pickle.dump((
(source_text, target_text),
(source_vocab_to_int, target_vocab_to_int),
(source_int_to_vocab, target_int_to_vocab)), out_file)
def load_preprocess():
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
with open('preprocess.p', mode='rb') as in_file:
return pickle.load(in_file)
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
"""
vocab = set(text.split())
vocab_to_int = copy.copy(CODES)
for v_i, v in enumerate(vocab, len(CODES)):
vocab_to_int[v] = v_i
int_to_vocab = {v_i: v for v, v_i in vocab_to_int.items()}
return vocab_to_int, int_to_vocab
def save_params(params):
"""
Save parameters to file
"""
with open('params.p', 'wb') as out_file:
pickle.dump(params, out_file)
def load_params():
"""
Load parameters from file
"""
with open('params.p', mode='rb') as in_file:
return pickle.load(in_file)
def batch_data(source, target, batch_size):
"""
Batch source and target together
"""
for batch_i in range(0, len(source)//batch_size):
start_i = batch_i * batch_size
source_batch = source[start_i:start_i + batch_size]
target_batch = target[start_i:start_i + batch_size]
yield np.array(pad_sentence_batch(source_batch)), np.array(pad_sentence_batch(target_batch))
def pad_sentence_batch(sentence_batch):
"""
Pad sentence with <PAD> id
"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [CODES['<PAD>']] * (max_sentence - len(sentence))
for sentence in sentence_batch]
| {
"content_hash": "7e30b7660a561f3ba38f72a432b166c2",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 110,
"avg_line_length": 28.1010101010101,
"alnum_prop": 0.5970524802300503,
"repo_name": "d-k-b/udacity-deep-learning",
"id": "4ec7c43fd617d664cf4f2ecae84d68b0f2a5a886",
"size": "2782",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "language-translation/helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1911595"
},
{
"name": "Jupyter Notebook",
"bytes": "11835803"
},
{
"name": "Python",
"bytes": "96145"
}
],
"symlink_target": ""
} |
import string
import os
import sys
DIST_PACK_DIR = os.path.dirname(os.path.realpath(__file__))
VIA_SRC_DIR = os.path.join(DIST_PACK_DIR, '..')
SOURCE_HTML = os.path.join(VIA_SRC_DIR, 'src', 'index.html')
OUT_HTML = os.path.join(VIA_SRC_DIR, 'dist', 'via.html')
def get_src_file_contents(filename):
full_filename = os.path.join(VIA_SRC_DIR, 'src', filename)
with open(full_filename) as f:
return f.read()
with open(OUT_HTML, 'w') as outf:
with open(SOURCE_HTML, 'r') as inf:
for line in inf:
if '<script src="' in line:
tok = line.split('"')
filename = tok[1]
outf.write('<!-- START: Contents of file: ' + filename + '-->\n')
outf.write('<script>\n')
outf.write( get_src_file_contents(filename) )
outf.write('</script>\n')
outf.write('<!-- END: Contents of file: ' + filename + '-->\n')
else:
if '<link rel="stylesheet" type="text/css"' in line:
tok = line.split('"')
filename = tok[5]
outf.write('<!-- START: Contents of file: ' + filename + '-->\n')
outf.write('<style>\n')
outf.write( get_src_file_contents(filename) )
outf.write('</style>\n')
outf.write('<!-- END: Contents of file: ' + filename + '-->\n')
else:
parsedline = line
if "//__ENABLED_BY_PACK_SCRIPT__" in line:
parsedline = line.replace('//__ENABLED_BY_PACK_SCRIPT__', '');
outf.write(parsedline)
print("Written packed file to: " + OUT_HTML)
| {
"content_hash": "1f5b39959af91651be840effdbf7fbe8",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 75,
"avg_line_length": 36.26190476190476,
"alnum_prop": 0.5633617859487853,
"repo_name": "ox-vgg/via",
"id": "08ab4ec927d3a797df6fab487256cc6cc752137e",
"size": "1635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "via-2.x.y/scripts/pack_via.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "3652"
},
{
"name": "CSS",
"bytes": "67802"
},
{
"name": "HTML",
"bytes": "1229911"
},
{
"name": "JavaScript",
"bytes": "2928237"
},
{
"name": "MATLAB",
"bytes": "5737"
},
{
"name": "Python",
"bytes": "22787"
},
{
"name": "Shell",
"bytes": "2529"
}
],
"symlink_target": ""
} |
import argparse
import datetime
import json
import os
import math
import random
import re
import shutil
import sys
import subprocess
import tempfile
class FileInfo(object):
mb = 1000000 # we use os.stat that gives the file size in kb
@staticmethod
def normalize_name(name):
"""
:param name: a file name
:return: name that is clean of non ascii characters.
"""
# Remove leading and trailing spaces
name = name.strip()
# Replace any character which is not in the set 0-9a-zA-Z.- with a dot '.'
name = re.sub("[^0-9a-zA-Z.-]", ".", name)
# Replace '.-.' with '.'
name = name.replace(".-.", "-")
# Remove any number of dots from the beginning of the name
name = re.sub("^\\.+", "", name)
# Replace 2 or more subsequent dots with one dot
normalized_file_name = re.sub("\\.{2,}", ".", name)
return normalized_file_name
def __init__(self, source_path, working_path, media_info_path):
"""
:param source_path: absolute path to file/directory to work on
:param working_path: path to place temporary files such as rar
:param media_info_path: path to place permanent files like nzb
"""
base_name = os.path.basename(source_path)
base_name = base_name.replace("'", "").replace('"', '')
origin_path = os.path.join(os.path.dirname(source_path), base_name)
os.rename(src=source_path, dst=origin_path)
is_file = os.path.isfile(origin_path)
if is_file:
file_name, file_extension = os.path.splitext(base_name)
size_mb = int(os.stat(origin_path).st_size / self.mb)
else:
file_name, file_extension = base_name, ""
size_mb = sum(
int(os.stat(os.path.join(origin_path, f)).st_size / self.mb)
for f in os.listdir(origin_path)
if os.path.isfile(os.path.join(origin_path, f))
)
self.source_path = source_path
self.origin_path = origin_path
self.working_path = working_path
self.media_info_path = media_info_path
self.file_name = file_name
self.file_extension = file_extension
self.is_file = is_file
self.size_mb = size_mb
self.normalized_file_name = FileInfo.normalize_name(file_name)
@property
def parent_path(self):
return os.path.dirname(self.origin_path)
@property
def is_media_file(self):
return self.file_extension in (".avi", ".mkv", ".mp4", ".mpg")
@property
def nfo_file_path(self):
return os.path.join(self.media_info_path, self.normalized_file_name + ".nfo")
@property
def rar_file_path(self):
return os.path.join(self.working_path, self.normalized_file_name + ".rar")
@property
def rar_files_path_wildcard(self):
return os.path.join(self.working_path, self.normalized_file_name + ".*.rar")
@property
def par_file_path(self):
return os.path.join(self.working_path, self.normalized_file_name + ".par")
@property
def nzb_file_path(self):
return os.path.join(self.media_info_path, self.normalized_file_name + ".nzb")
def __str__(self):
fields = ("{} = '{}'".format(k, v) for k, v in self.__dict__.items())
return "FileInfo[\n{}\n]".format(",\n".join(fields))
"""
Globals - populated in def setup
"""
SERVERS = []
BASE_DIR = ""
WATCH_DIR = ""
MEDIA_INFO_DIR = ""
RAM_GB = 2
DELETE_SOURCE = False
HEB_LIST = []
PUBLISH_API_KEY = ""
RSYNC_DIR = "dnzb:/temp/" + datetime.datetime.today().strftime('%Y-%m-%d')
def generate_media_file(fi: FileInfo):
cmd = "mediainfo --LogFile={} '{}'".format(
fi.nfo_file_path,
fi.origin_path,
)
print("Running '{}'".format(cmd))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.DEVNULL)
with open(fi.nfo_file_path, "r") as reader:
lines = reader.read()
lines = lines.replace(fi.parent_path + "/", "")
with open(fi.nfo_file_path, "w") as writer:
writer.write(lines)
print("created media file in - ", fi.nfo_file_path)
def generate_rar_files(fi: FileInfo):
rar_vol_size_mb = "200M"
if 0 < fi.size_mb < 700:
rar_vol_size_mb = "20M"
elif 700 < fi.size_mb < 3000:
rar_vol_size_mb = "50M"
elif 3000 < fi.size_mb < 5000:
rar_vol_size_mb = "100M"
print("file size is {}MB, rar vol size is {}B".format(fi.size_mb, rar_vol_size_mb))
cmd = "rar a -m0 -ep -ed -r -y -V{} {} '{}'".format(
rar_vol_size_mb,
fi.rar_file_path,
fi.origin_path,
)
print("Running '{}'".format(cmd))
subprocess.run(cmd, shell=True, check=True)
def generate_par_files(fi: FileInfo):
if 0 < fi.size_mb < 700:
par_input_slice_mb = 10
elif 700 < fi.size_mb < 3000:
par_input_slice_mb = 15
elif 3000 < fi.size_mb < 5000:
par_input_slice_mb = 25
else:
par_input_slice_mb = 100
# set memory used to RAM-1GB or min of 2G
memory_mb = max(2, (math.floor(RAM_GB) - 1)) * 1000
cmd = 'parpar -n -O -d pow2 -r"20%" --input-slices={}M -m {}M -o {} {}'.format(
par_input_slice_mb,
memory_mb,
fi.par_file_path,
fi.rar_files_path_wildcard,
)
print("Running '{}'".format(cmd))
subprocess.run(cmd, shell=True, check=True)
def upload_files(fi: FileInfo, servers):
server = random.choice(servers)
parameters = dict(serial="'${rand(38)}'", outf=fi.nzb_file_path, inf=fi.working_path)
parameters.update(server)
template = "nyuu -S -h {host} -P {port} -n{connections} -u {user} -p {password} -s {serial} -o {outf} {inf}"
cmd = template.format(**parameters)
print("uploading to host='{}', # of connections='{}'".format(server["host"], server["connections"]))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.DEVNULL)
def backup_files(fi: FileInfo, backup_path=RSYNC_DIR):
cmd = "rclone copy -P {} {}".format(
fi.nzb_file_path,
backup_path,
)
print("Running '{}'".format(cmd))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.DEVNULL)
def setup(file_path, delete_source):
"""
Reads the file path into global variables
:param file_path: absolute path to JSON config file
:param delete_source: flag to indicate whether to delete source file
:return: None. side effects of populating the global variables
"""
print("using configurations from file", file_path)
with open(file_path) as f:
config = json.load(f)
global SERVERS, BASE_DIR, DELETE_SOURCE, MEDIA_INFO_DIR, PUBLISH_API_KEY, RAM_GB, WATCH_DIR, HEB_LIST
DELETE_SOURCE = delete_source
PUBLISH_API_KEY = config["publish_api_key"]
SERVERS = config["servers"]
BASE_DIR = config["base_dir"]
MEDIA_INFO_DIR = config["media_info_dir"]
WATCH_DIR = config["watch_dir"]
print("using hosts =", ",".join([s["host"] for s in SERVERS]))
print("using BASE_DIR =", BASE_DIR)
print("Will delete source files={}".format(DELETE_SOURCE))
HEB_LIST = config["heb_list"]
print("Using hebrew list: {}".format(",".join(HEB_LIST)))
# calculate the RAM on the server
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') # e.g. 4015976448
RAM_GB = mem_bytes / (1024.**3)
print("found RAM={:.2f}GB".format(RAM_GB))
# clear the media info dir and recreate it before working
shutil.rmtree(MEDIA_INFO_DIR, ignore_errors=True)
os.makedirs(MEDIA_INFO_DIR)
def check_commands_installed():
"""
Checks that required commands are installed.
Exists if any dependency is not installed
"""
def is_installed(cmd):
try:
res = subprocess.run(cmd, shell=True, check=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
return res.returncode == 0
except Exception as e:
print(e)
return False
commands_state = [
(cmd, is_installed(cmd)) for cmd in [
"mediainfo -Help",
"rar -?",
"parpar --help",
"nyuu --help",
"rclone version",
]
]
installed_commands = [cmd for cmd, state in commands_state if state]
if len(installed_commands) == len(commands_state):
print("'{}' commands are installed".format(" , ".join(installed_commands)))
else:
uninstalled_commands = [cmd for cmd, state in commands_state if not state]
print("'{}' commands are NOT installed".format(" , ".join(uninstalled_commands)))
sys.exit(-10)
def cleanup(fi: FileInfo):
if DELETE_SOURCE:
print("Deleting source file from '{}'".format(fi.origin_path))
if fi.is_file:
os.remove(fi.origin_path)
else:
shutil.rmtree(fi.origin_path)
else:
print("Delete source flag is turned off, so nothing to do here")
def publish(fi: FileInfo):
"""
publishes nzb file to omgwtfnzbs.me.
Deduce the category id (tv or movie) from file name
Deduce if in heb from name using the HEB_LIST
"""
rlsname = fi.normalized_file_name
parameters = {
"rlsname": rlsname,
"nzb": "@"+fi.nzb_file_path,
"upload": "upload",
}
tv = re.compile(".*S[0-9][0-9].*|.*complete.*", re.IGNORECASE)
catid = "tv" if tv.match(rlsname) else "movie"
parameters["catid"] = catid
if fi.is_media_file:
parameters["nfo"] = "@" + fi.nfo_file_path
is_heb = any((heb_word in rlsname for heb_word in HEB_LIST))
if is_heb:
parameters["language"] = "12"
parameters_cmd = " ".join('-F "{}={}"'.format(k, v) for k, v in parameters.items())
url = "https://omgwtfnzbs.me/api-upload?user=mshare&api={}".format(PUBLISH_API_KEY)
cmd = "curl -k -s -L -i -m60 {parameters_cmd} '{url}'".format(
parameters_cmd=parameters_cmd,
url=url,
)
print("Running '{}'".format(cmd))
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.DEVNULL)
def process_file_info(source_path):
with tempfile.TemporaryDirectory() as temp_dir:
file_info = FileInfo(source_path, temp_dir, MEDIA_INFO_DIR)
print(file_info)
if file_info.is_media_file:
generate_media_file(file_info)
generate_rar_files(file_info)
generate_par_files(file_info)
upload_files(file_info, SERVERS)
backup_files(file_info)
publish(file_info)
cleanup(file_info)
return file_info
def read_args():
"""
Reads command line parameters.
"""
parser = argparse.ArgumentParser(description="""
Uploads media files to servers in an archive format.""")
parser.add_argument('-c', '--config_file', help="a JSON format config file. Default is a config.json",
default="config.json")
parser.add_argument('-d', '--delete_source', action='store_true',
help="A boolean flag, when provided, will delete the source file")
return parser.parse_args()
if __name__ == "__main__":
print("START O V3 script")
check_commands_installed()
args = read_args()
setup(file_path=args.config_file, delete_source=args.delete_source)
source_files = (os.path.join(WATCH_DIR, f) for f in os.listdir(WATCH_DIR))
for source in source_files:
start = datetime.datetime.now()
fi = process_file_info(source)
delta = datetime.datetime.now() - start
print(
"file_name='{}' | size='{}MB' | process time='{}'".format(
fi.normalized_file_name,
fi.size_mb,
delta,
)
)
print("FINISHED O V3 script")
| {
"content_hash": "e482471a664dfe3c0b614f171a2c66cb",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 115,
"avg_line_length": 32.89664804469274,
"alnum_prop": 0.5992188163369279,
"repo_name": "ofreshy/ojo",
"id": "86ded157e68b5c711c073f135a4624623acc563e",
"size": "11777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ojo/ooo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29579"
}
],
"symlink_target": ""
} |
from a10sdk.common.A10BaseClass import A10BaseClass
class PortRange(A10BaseClass):
""" :param port_start: {"optional": false, "type": "number", "description": "Single Port or Port Range Start", "format": "number"}
:param port_end: {"optional": false, "type": "number", "description": "Port Range End", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Single Port or Port Range Start.
Class port-range supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/lw-4o6/binding-table/{name}/tunnel-address/{ipv6_tunnel_addr}/nat-address/{ipv4_nat_addr}/port-range/{port_start}+{port_end}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "port_start","port_end"]
self.b_key = "port-range"
self.a10_url="/axapi/v3/cgnv6/lw-4o6/binding-table/{name}/tunnel-address/{ipv6_tunnel_addr}/nat-address/{ipv4_nat_addr}/port-range/{port_start}+{port_end}"
self.DeviceProxy = ""
self.port_start = ""
self.port_end = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| {
"content_hash": "a38be2296b823087d713479e35dd5e38",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 177,
"avg_line_length": 36.13157894736842,
"alnum_prop": 0.6453022578295703,
"repo_name": "amwelch/a10sdk-python",
"id": "9f676bc531a1cc94b85ef675d3be2a01ac9d4568",
"size": "1373",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/cgnv6/cgnv6_lw_4o6_binding_table_tunnel_address_nat_address_port_range.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import re
import os
from xml.etree import ElementTree as ET
import openmc
import openmc.checkvalue as cv
from openmc.data import NATURAL_ABUNDANCE, atomic_mass
class Element(str):
"""A natural element that auto-expands to add the isotopes of an element to
a material in their natural abundance. Internally, the OpenMC Python API
expands the natural element into isotopes only when the materials.xml file
is created.
Parameters
----------
name : str
Chemical symbol of the element, e.g. Pu
Attributes
----------
name : str
Chemical symbol of the element, e.g. Pu
"""
def __new__(cls, name):
cv.check_type('element name', name, str)
cv.check_length('element name', name, 1, 2)
return super().__new__(cls, name)
@property
def name(self):
return self
def expand(self, percent, percent_type, enrichment=None,
cross_sections=None):
"""Expand natural element into its naturally-occurring isotopes.
An optional cross_sections argument or the OPENMC_CROSS_SECTIONS
environment variable is used to specify a cross_sections.xml file.
If the cross_sections.xml file is found, the element is expanded only
into the isotopes/nuclides present in cross_sections.xml. If no
cross_sections.xml file is found, the element is expanded based on its
naturally occurring isotopes.
Parameters
----------
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}
'ao' for atom percent and 'wo' for weight percent
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
Returns
-------
isotopes : list
Naturally-occurring isotopes of the element. Each item of the list
is a tuple consisting of a nuclide string, the atom/weight percent,
and the string 'ao' or 'wo'.
Notes
-----
When the `enrichment` argument is specified, a correlation from
`ORNL/CSD/TM-244 <https://doi.org/10.2172/5561567>`_ is used to
calculate the weight fractions of U234, U235, U236, and U238. Namely,
the weight fraction of U234 and U236 are taken to be 0.89% and 0.46%,
respectively, of the U235 weight fraction. The remainder of the isotopic
weight is assigned to U238.
"""
# Get the nuclides present in nature
natural_nuclides = set()
for nuclide in sorted(NATURAL_ABUNDANCE.keys()):
if re.match(r'{}\d+'.format(self), nuclide):
natural_nuclides.add(nuclide)
# Create dict to store the expanded nuclides and abundances
abundances = OrderedDict()
# If cross_sections is None, get the cross sections from the
# OPENMC_CROSS_SECTIONS environment variable
if cross_sections is None:
cross_sections = os.environ.get('OPENMC_CROSS_SECTIONS')
# If a cross_sections library is present, check natural nuclides
# against the nuclides in the library
if cross_sections is not None:
library_nuclides = set()
tree = ET.parse(cross_sections)
root = tree.getroot()
for child in root:
nuclide = child.attrib['materials']
if re.match(r'{}\d+'.format(self), nuclide) and \
'_m' not in nuclide:
library_nuclides.add(nuclide)
# Get a set of the mutual and absent nuclides. Convert to lists
# and sort to avoid different ordering between Python 2 and 3.
mutual_nuclides = natural_nuclides.intersection(library_nuclides)
absent_nuclides = natural_nuclides.difference(mutual_nuclides)
mutual_nuclides = sorted(list(mutual_nuclides))
absent_nuclides = sorted(list(absent_nuclides))
# If all natural nuclides are present in the library, expand element
# using all natural nuclides
if len(absent_nuclides) == 0:
for nuclide in mutual_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# If no natural elements are present in the library, check if the
# 0 nuclide is present. If so, set the abundance to 1 for this
# nuclide. Else, raise an error.
elif len(mutual_nuclides) == 0:
nuclide_0 = self + '0'
if nuclide_0 in library_nuclides:
abundances[nuclide_0] = 1.0
else:
msg = 'Unable to expand element {0} because the cross '\
'section library provided does not contain any of '\
'the natural isotopes for that element.'\
.format(self)
raise ValueError(msg)
# If some, but not all, natural nuclides are in the library, add
# the mutual nuclides. For the absent nuclides, add them based on
# our knowledge of the common cross section libraries
# (ENDF, JEFF, and JENDL)
else:
# Add the mutual isotopes
for nuclide in mutual_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# Adjust the abundances for the absent nuclides
for nuclide in absent_nuclides:
if nuclide in ['O17', 'O18'] and 'O16' in mutual_nuclides:
abundances['O16'] += NATURAL_ABUNDANCE[nuclide]
elif nuclide == 'Ta180' and 'Ta181' in mutual_nuclides:
abundances['Ta181'] += NATURAL_ABUNDANCE[nuclide]
elif nuclide == 'W180' and 'W182' in mutual_nuclides:
abundances['W182'] += NATURAL_ABUNDANCE[nuclide]
else:
msg = 'Unsure how to partition natural abundance of ' \
'isotope {0} into other natural isotopes of ' \
'this element that are present in the cross ' \
'section library provided. Consider adding ' \
'the isotopes of this element individually.'
raise ValueError(msg)
# If a cross_section library is not present, expand the element into
# its natural nuclides
else:
for nuclide in natural_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# Modify mole fractions if enrichment provided
if enrichment is not None:
# Calculate the mass fractions of isotopes
abundances['U234'] = 0.0089 * enrichment
abundances['U235'] = enrichment
abundances['U236'] = 0.0046 * enrichment
abundances['U238'] = 100.0 - 1.0135 * enrichment
# Convert the mass fractions to mole fractions
for nuclide in abundances.keys():
abundances[nuclide] /= atomic_mass(nuclide)
# Normalize the mole fractions to one
sum_abundances = sum(abundances.values())
for nuclide in abundances.keys():
abundances[nuclide] /= sum_abundances
# Compute the ratio of the nuclide atomic masses to the element
# atomic mass
if percent_type == 'wo':
# Compute the element atomic mass
element_am = 0.
for nuclide in abundances.keys():
element_am += atomic_mass(nuclide) * abundances[nuclide]
# Convert the molar fractions to mass fractions
for nuclide in abundances.keys():
abundances[nuclide] *= atomic_mass(nuclide) / element_am
# Normalize the mass fractions to one
sum_abundances = sum(abundances.values())
for nuclide in abundances.keys():
abundances[nuclide] /= sum_abundances
# Create a list of the isotopes in this element
isotopes = []
for nuclide, abundance in abundances.items():
isotopes.append((nuclide, percent * abundance, percent_type))
return isotopes
| {
"content_hash": "7bbd41ca3faee08a3ec5b356ad3e7835",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 80,
"avg_line_length": 41.20673076923077,
"alnum_prop": 0.5856959514642399,
"repo_name": "johnnyliu27/openmc",
"id": "336d1f028bc74433e8686e77f8a5dfc852d7ad68",
"size": "8571",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "openmc/element.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7619"
},
{
"name": "C++",
"bytes": "825710"
},
{
"name": "CMake",
"bytes": "33163"
},
{
"name": "Dockerfile",
"bytes": "1427"
},
{
"name": "Fortran",
"bytes": "1089808"
},
{
"name": "Python",
"bytes": "2433489"
},
{
"name": "Shell",
"bytes": "2986"
}
],
"symlink_target": ""
} |
from suit.apps import DjangoSuitConfig
from suit.menu import ChildItem, ParentItem
class SuitConfig(DjangoSuitConfig):
layout = 'vertical'
menu = (
ParentItem('Users', children=[
ChildItem(model='users.user'),
ChildItem(model='auth.group'),
], icon='fa fa-users'),
ParentItem(app='countries', icon='fa fa-map-marker'),
)
| {
"content_hash": "b7615fcc577a13e5cd90b460c1fe1feb",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 61,
"avg_line_length": 27.428571428571427,
"alnum_prop": 0.6276041666666666,
"repo_name": "mongkok/defocus",
"id": "601d4056272ab2f788b5a0c135e8419104c170a0",
"size": "384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5215"
},
{
"name": "Makefile",
"bytes": "3210"
},
{
"name": "Python",
"bytes": "39366"
},
{
"name": "Shell",
"bytes": "1712"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.