hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a761022844f7e41562d983142585602cd2c64882
| 207
|
py
|
Python
|
process_2.py
|
renansantosmendes/benchmark_tests
|
106f842b304a7fc9fa348ea0b6d50f448e46538b
|
[
"Apache-2.0"
] | null | null | null |
process_2.py
|
renansantosmendes/benchmark_tests
|
106f842b304a7fc9fa348ea0b6d50f448e46538b
|
[
"Apache-2.0"
] | null | null | null |
process_2.py
|
renansantosmendes/benchmark_tests
|
106f842b304a7fc9fa348ea0b6d50f448e46538b
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
print('Process 2')
print('start:', datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
for _ in range(100000000):
pass
print('end:', datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
| 29.571429
| 61
| 0.63285
|
6e27cf1290f844f27aaf54398c3595aa12f64f0d
| 9,082
|
py
|
Python
|
trimesh/scene/lighting.py
|
LinJiarui/trimesh
|
5f925bbab447e733d6f1ebf0956b202d18271ee1
|
[
"MIT"
] | 1
|
2020-05-22T13:56:05.000Z
|
2020-05-22T13:56:05.000Z
|
trimesh/scene/lighting.py
|
LinJiarui/trimesh
|
5f925bbab447e733d6f1ebf0956b202d18271ee1
|
[
"MIT"
] | null | null | null |
trimesh/scene/lighting.py
|
LinJiarui/trimesh
|
5f925bbab447e733d6f1ebf0956b202d18271ee1
|
[
"MIT"
] | null | null | null |
"""
lighting.py
--------------
Hold basic information about lights.
Forked from the light model in `pyrender`:
https://github.com/mmatl/pyrender
"""
import abc
import sys
import numpy as np
from .. import util
from .. import visual
from .. import transformations
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
# default light color
_DEFAULT_RGBA = np.array([60, 60, 60, 255], dtype=np.uint8)
class Light(ABC):
"""
Base class for all light objects.
Attributes
----------
name : str, optional
Name of the light.
color : (4,) uint8
RGBA value for the light's color in linear space.
intensity : float
Brightness of light. The units that this is defined in depend
on the type of light: point and spot lights use luminous intensity
in candela (lm/sr) while directional lights use illuminance
in lux (lm/m2).
radius : float
Cutoff distance at which light's intensity may be considered to
have reached zero. Supported only for point and spot lights
Must be > 0.0
If None, the radius is assumed to be infinite.
"""
def __init__(self,
name=None,
color=None,
intensity=None,
radius=None):
if name is None:
# if name is not passed, make it something unique
self.name = 'light_{}'.format(util.unique_id(6).upper())
else:
# otherwise assign it
self.name = name
self.color = color
self.intensity = intensity
self.radius = radius
@property
def color(self):
return self._color
@color.setter
def color(self, value):
if value is None:
self._color = _DEFAULT_RGBA
else:
value = visual.to_rgba(value)
if len(value.shape) == 2:
value = value[0]
if value.shape != (4,):
raise ValueError("couldn't convert color to RGBA!")
# uint8 RGB color
self._color = value
@property
def intensity(self):
return self._intensity
@intensity.setter
def intensity(self, value):
if value is not None:
self._intensity = float(value)
else:
self._intensity = 1.0
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, value):
if value is None or value < 0.0:
self._radius = value
else:
self._radius = float(value)
class DirectionalLight(Light):
"""
Directional lights are light sources that act as though they are
infinitely far away and emit light in the direction of the local -z axis.
This light type inherits the orientation of the node that it belongs to;
position and scale are ignored except for their effect on the inherited
node orientation. Because it is at an infinite distance, the light is
not attenuated. Its intensity is defined in lumens per metre squared,
or lux (lm/m2).
Attributes
----------
name : str, optional
Name of the light.
color : (4,) unit8
RGBA value for the light's color in linear space.
intensity : float
Brightness of light. The units that this is defined in depend on the type of light.
point and spot lights use luminous intensity in candela (lm/sr),
while directional lights use illuminance in lux (lm/m2).
radius : float
Cutoff distance at which light's intensity may be considered to
have reached zero. Supported only for point and spot lights, must be > 0.
If None, the radius is assumed to be infinite.
"""
def __init__(self,
name=None,
color=None,
intensity=None,
radius=None):
super(DirectionalLight, self).__init__(
name=name,
color=color,
intensity=intensity,
radius=radius
)
class PointLight(Light):
"""
Point lights emit light in all directions from their position in space;
rotation and scale are ignored except for their effect on the inherited
node position. The brightness of the light attenuates in a physically
correct manner as distance increases from the light's position (i.e.
brightness goes like the inverse square of the distance). Point light
intensity is defined in candela, which is lumens per square radian (lm/sr).
Attributes
----------
name : str, optional
Name of the light.
color : (4,) uint8
RGBA value for the light's color in linear space.
intensity : float
Brightness of light. The units that this is defined in depend on the type of light.
point and spot lights use luminous intensity in candela (lm/sr),
while directional lights use illuminance in lux (lm/m2).
radius : float
Cutoff distance at which light's intensity may be considered to
have reached zero. Supported only for point and spot lights, must be > 0.
If None, the radius is assumed to be infinite.
"""
def __init__(self,
name=None,
color=None,
intensity=None,
radius=None):
super(PointLight, self).__init__(
name=name,
color=color,
intensity=intensity,
radius=radius
)
class SpotLight(Light):
"""
Spot lights emit light in a cone in the direction of the local -z axis.
The angle and falloff of the cone is defined using two numbers, the
`innerConeAngle` and `outerConeAngle`. As with point lights, the brightness
also attenuates in a physically correct manner as distance increases from
the light's position (i.e. brightness goes like the inverse square of the
distance). Spot light intensity refers to the brightness inside the
`innerConeAngle` (and at the location of the light) and is defined in
candela, which is lumens per square radian (lm/sr). A spot light's position
and orientation are inherited from its node transform. Inherited scale does
not affect cone shape, and is ignored except for its effect on position
and orientation.
Attributes
----------
name : str, optional
Name of the light.
color : (4,) uint8
RGBA value for the light's color in linear space.
intensity : float
Brightness of light. The units that this is defined in depend on the type of light.
point and spot lights use luminous intensity in candela (lm/sr),
while directional lights use illuminance in lux (lm/m2).
radius : float
Cutoff distance at which light's intensity may be considered to
have reached zero. Supported only for point and spot lights, must be > 0.
If None, the radius is assumed to be infinite.
innerConeAngle : float
Angle, in radians, from centre of spotlight where falloff begins.
Must be greater than or equal to `0` and less than `outerConeAngle`.
outerConeAngle : float
Angle, in radians, from centre of spotlight where falloff ends.
Must be greater than `innerConeAngle` and less than or equal to `PI / 2.0`.
"""
def __init__(self,
name=None,
color=None,
intensity=None,
radius=None,
innerConeAngle=0.0,
outerConeAngle=np.pi / 4.0):
super(SpotLight, self).__init__(
name=name,
color=color,
intensity=intensity,
radius=radius
)
self.outerConeAngle = outerConeAngle
self.innerConeAngle = innerConeAngle
@property
def innerConeAngle(self):
return self._innerConeAngle
@innerConeAngle.setter
def innerConeAngle(self, value):
if value < 0.0 or value > self.outerConeAngle:
raise ValueError('Invalid value for inner cone angle')
self._innerConeAngle = float(value)
@property
def outerConeAngle(self):
return self._outerConeAngle
@outerConeAngle.setter
def outerConeAngle(self, value):
if value < 0.0 or value > np.pi / 2.0 + 1e-9:
raise ValueError('Invalid value for outer cone angle')
self._outerConeAngle = float(value)
def autolight(scene):
"""
Generate a list of lights for a scene that looks decent.
Parameters
--------------
scene : trimesh.Scene
Scene with geometry
Returns
--------------
lights : [Light]
List of light objects
transforms : (len(lights), 4, 4) float
Transformation matrices for light positions.
"""
# create two default point lights
lights = [PointLight(), PointLight()]
# create two translation matrices for bounds corners
transforms = [transformations.translation_matrix(b)
for b in scene.bounds]
return lights, transforms
| 32.205674
| 91
| 0.62233
|
f7c83712130bfd4becefa3c99a09f0c9e7be5c28
| 20,999
|
py
|
Python
|
litex/soc/cores/cpu/vexriscv_smp/core.py
|
JosephBushagour/litex
|
2b49430f2c53c4a8caa66b678af4660127b546e4
|
[
"ADSL"
] | null | null | null |
litex/soc/cores/cpu/vexriscv_smp/core.py
|
JosephBushagour/litex
|
2b49430f2c53c4a8caa66b678af4660127b546e4
|
[
"ADSL"
] | null | null | null |
litex/soc/cores/cpu/vexriscv_smp/core.py
|
JosephBushagour/litex
|
2b49430f2c53c4a8caa66b678af4660127b546e4
|
[
"ADSL"
] | null | null | null |
#
# This file is part of LiteX.
#
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2020 Dolu1990 <charles.papon.90@gmail.com>
# SPDX-License-Identifier: BSD-2-Clause
import os
from os import path
from migen import *
from litex import get_data_mod
from litex.soc.interconnect import wishbone
from litex.soc.interconnect.csr import *
from litex.soc.cores.cpu import CPU, CPU_GCC_TRIPLE_RISCV32
import os
class Open(Signal): pass
# Variants -----------------------------------------------------------------------------------------
CPU_VARIANTS = {
"standard": "VexRiscv",
"linux": "VexRiscv", # Similar to standard.
}
# VexRiscv SMP -------------------------------------------------------------------------------------
class VexRiscvSMP(CPU):
name = "vexriscv"
human_name = "VexRiscv SMP"
variants = CPU_VARIANTS
data_width = 32
endianness = "little"
gcc_triple = CPU_GCC_TRIPLE_RISCV32
linker_output_format = "elf32-littleriscv"
nop = "nop"
io_regions = {0x80000000: 0x80000000} # Origin, Length.
# Default parameters.
cpu_count = 1
dcache_size = 4096
icache_size = 4096
dcache_ways = 1
icache_ways = 1
coherent_dma = False
litedram_width = 32
dcache_width = 32
icache_width = 32
aes_instruction = False
out_of_order_decoder = True
wishbone_memory = False
with_fpu = False
cpu_per_fpu = 4
with_rvc = False
dtlb_size = 4
itlb_size = 4
# Command line configuration arguments.
@staticmethod
def args_fill(parser):
parser.add_argument("--cpu-count", default=1, help="Number of CPU(s) in the cluster.", type=int)
parser.add_argument("--with-coherent-dma", action="store_true", help="Enable Coherent DMA Slave interface.")
parser.add_argument("--without-coherent-dma", action="store_true", help="Disable Coherent DMA Slave interface.")
parser.add_argument("--dcache-width", default=None, help="L1 data cache bus width.")
parser.add_argument("--icache-width", default=None, help="L1 instruction cache bus width.")
parser.add_argument("--dcache-size", default=None, help="L1 data cache size in byte per CPU.")
parser.add_argument("--dcache-ways", default=None, help="L1 data cache ways per CPU.")
parser.add_argument("--icache-size", default=None, help="L1 instruction cache size in byte per CPU.")
parser.add_argument("--icache-ways", default=None, help="L1 instruction cache ways per CPU")
parser.add_argument("--aes-instruction", default=None, help="Enable AES instruction acceleration.")
parser.add_argument("--without-out-of-order-decoder", action="store_true", help="Reduce area at cost of peripheral access speed")
parser.add_argument("--with-wishbone-memory" , action="store_true", help="Disable native LiteDRAM interface")
parser.add_argument("--with-fpu" , action="store_true", help="Enable the F32/F64 FPU")
parser.add_argument("--cpu-per-fpu" , default="4", help="Maximal ratio between CPU count and FPU count. Will instanciate as many FPU as necessary.")
parser.add_argument("--with-rvc" , action="store_true", help="Enable RISC-V compressed instruction support")
parser.add_argument("--dtlb-size", default=4, help="Data TLB size.")
parser.add_argument("--itlb-size", default=4, help="Instruction TLB size.")
@staticmethod
def args_read(args):
VexRiscvSMP.cpu_count = args.cpu_count
if int(args.cpu_count) != 1:
VexRiscvSMP.icache_width = 64
VexRiscvSMP.dcache_width = 64
VexRiscvSMP.dcache_size = 8192
VexRiscvSMP.icache_size = 8192
VexRiscvSMP.dcache_ways = 2
VexRiscvSMP.icache_ways = 2
VexRiscvSMP.coherent_dma = True
if(args.with_coherent_dma): VexRiscvSMP.coherent_dma = bool(True)
if(args.without_coherent_dma): VexRiscvSMP.coherent_dma = bool(False)
if(args.dcache_width): VexRiscvSMP.dcache_width = int(args.dcache_width)
if(args.icache_width): VexRiscvSMP.icache_width = int(args.icache_width)
if(args.dcache_size): VexRiscvSMP.dcache_size = int(args.dcache_size)
if(args.icache_size): VexRiscvSMP.icache_size = int(args.icache_size)
if(args.dcache_ways): VexRiscvSMP.dcache_ways = int(args.dcache_ways)
if(args.icache_ways): VexRiscvSMP.icache_ways = int(args.icache_ways)
if(args.aes_instruction): VexRiscvSMP.aes_instruction = bool(args.aes_instruction)
if(args.without_out_of_order_decoder): VexRiscvSMP.out_of_order_decoder = False
if(args.with_wishbone_memory): VexRiscvSMP.wishbone_memory = True
if(args.with_fpu):
VexRiscvSMP.with_fpu = True
VexRiscvSMP.icache_width = 64
VexRiscvSMP.dcache_width = 64 # Required for F64
if(args.cpu_per_fpu):
VexRiscvSMP.cpu_per_fpu = args.cpu_per_fpu
if(args.with_rvc):
VexRiscvSMP.with_rvc = True
if(args.dtlb_size): VexRiscvSMP.dtlb_size = int(args.dtlb_size)
if(args.itlb_size): VexRiscvSMP.itlb_size = int(args.itlb_size)
# ABI.
@staticmethod
def get_abi():
abi = "ilp32"
if VexRiscvSMP.with_fpu:
abi +="d"
return abi
# Arch.
@staticmethod
def get_arch():
arch = "rv32ima"
if VexRiscvSMP.with_fpu:
arch += "fd"
if VexRiscvSMP.with_rvc:
arch += "c"
return arch
# Memory Mapping.
@property
def mem_map(self):
return {
"rom": 0x00000000,
"sram": 0x10000000,
"main_ram": 0x40000000,
"csr": 0xf0000000,
"clint": 0xf0010000,
"plic": 0xf0c00000,
}
# GCC Flags.
@property
def gcc_flags(self):
flags = f" -march={VexRiscvSMP.get_arch()} -mabi={VexRiscvSMP.get_abi()}"
flags += f" -D__vexriscv__"
flags += f" -DUART_POLLING"
return flags
# Cluster Name Generation.
@staticmethod
def generate_cluster_name():
ldw = f"Ldw{VexRiscvSMP.litedram_width}"
VexRiscvSMP.cluster_name = f"VexRiscvLitexSmpCluster_" \
f"Cc{VexRiscvSMP.cpu_count}" \
"_" \
f"Iw{VexRiscvSMP.icache_width}" \
f"Is{VexRiscvSMP.icache_size}" \
f"Iy{VexRiscvSMP.icache_ways}" \
"_" \
f"Dw{VexRiscvSMP.dcache_width}" \
f"Ds{VexRiscvSMP.dcache_size}" \
f"Dy{VexRiscvSMP.dcache_ways}" \
"_" \
f"ITs{VexRiscvSMP.itlb_size}" \
f"DTs{VexRiscvSMP.dtlb_size}" \
f"{'_'+ldw if not VexRiscvSMP.wishbone_memory else ''}" \
f"{'_Cdma' if VexRiscvSMP.coherent_dma else ''}" \
f"{'_Aes' if VexRiscvSMP.aes_instruction else ''}" \
f"{'_Ood' if VexRiscvSMP.out_of_order_decoder else ''}" \
f"{'_Wm' if VexRiscvSMP.wishbone_memory else ''}" \
f"{'_Fpu' + str(VexRiscvSMP.cpu_per_fpu) if VexRiscvSMP.with_fpu else ''}" \
f"{'_Rvc' if VexRiscvSMP.with_rvc else ''}"
# Default Configs Generation.
@staticmethod
def generate_default_configs():
# Single cores.
for data_width in [16, 32, 64, 128]:
VexRiscvSMP.litedram_width = data_width
VexRiscvSMP.icache_width = 32
VexRiscvSMP.dcache_width = 32
VexRiscvSMP.coherent_dma = False
VexRiscvSMP.cpu_count = 1
# Low cache amount.
VexRiscvSMP.dcache_size = 4096
VexRiscvSMP.icache_size = 4096
VexRiscvSMP.dcache_ways = 1
VexRiscvSMP.icache_ways = 1
# Without DMA.
VexRiscvSMP.coherent_dma = False
VexRiscvSMP.generate_cluster_name()
VexRiscvSMP.generate_netlist()
# With DMA.
VexRiscvSMP.coherent_dma = True
VexRiscvSMP.generate_cluster_name()
VexRiscvSMP.generate_netlist()
# High cache amount.
VexRiscvSMP.dcache_size = 8192
VexRiscvSMP.icache_size = 8192
VexRiscvSMP.dcache_ways = 2
VexRiscvSMP.icache_ways = 2
VexRiscvSMP.icache_width = 32 if data_width < 64 else 64
VexRiscvSMP.dcache_width = 32 if data_width < 64 else 64
# Without DMA.
VexRiscvSMP.coherent_dma = False
VexRiscvSMP.generate_cluster_name()
VexRiscvSMP.generate_netlist()
# With DMA.
VexRiscvSMP.coherent_dma = True
VexRiscvSMP.generate_cluster_name()
VexRiscvSMP.generate_netlist()
# Multi cores.
for core_count in [2,4]:
VexRiscvSMP.litedram_width = 128
VexRiscvSMP.icache_width = 64
VexRiscvSMP.dcache_width = 64
VexRiscvSMP.dcache_size = 8192
VexRiscvSMP.icache_size = 8192
VexRiscvSMP.dcache_ways = 2
VexRiscvSMP.icache_ways = 2
VexRiscvSMP.coherent_dma = True
VexRiscvSMP.cpu_count = core_count
VexRiscvSMP.generate_cluster_name()
VexRiscvSMP.generate_netlist()
# Netlist Generation.
@staticmethod
def generate_netlist():
print(f"Generating cluster netlist")
vdir = get_data_mod("cpu", "vexriscv_smp").data_location
gen_args = []
if(VexRiscvSMP.coherent_dma):
gen_args.append("--coherent-dma")
gen_args.append(f"--cpu-count={VexRiscvSMP.cpu_count}")
gen_args.append(f"--ibus-width={VexRiscvSMP.icache_width}")
gen_args.append(f"--dbus-width={VexRiscvSMP.dcache_width}")
gen_args.append(f"--dcache-size={VexRiscvSMP.dcache_size}")
gen_args.append(f"--icache-size={VexRiscvSMP.icache_size}")
gen_args.append(f"--dcache-ways={VexRiscvSMP.dcache_ways}")
gen_args.append(f"--icache-ways={VexRiscvSMP.icache_ways}")
gen_args.append(f"--litedram-width={VexRiscvSMP.litedram_width}")
gen_args.append(f"--aes-instruction={VexRiscvSMP.aes_instruction}")
gen_args.append(f"--out-of-order-decoder={VexRiscvSMP.out_of_order_decoder}")
gen_args.append(f"--wishbone-memory={VexRiscvSMP.wishbone_memory}")
gen_args.append(f"--fpu={VexRiscvSMP.with_fpu}")
gen_args.append(f"--cpu-per-fpu={VexRiscvSMP.cpu_per_fpu}")
gen_args.append(f"--rvc={VexRiscvSMP.with_rvc}")
gen_args.append(f"--netlist-name={VexRiscvSMP.cluster_name}")
gen_args.append(f"--netlist-directory={vdir}")
gen_args.append(f"--dtlb-size={VexRiscvSMP.dtlb_size}")
gen_args.append(f"--itlb-size={VexRiscvSMP.itlb_size}")
cmd = 'cd {path} && sbt "runMain vexriscv.demo.smp.VexRiscvLitexSmpClusterCmdGen {args}"'.format(path=os.path.join(vdir, "ext", "VexRiscv"), args=" ".join(gen_args))
if os.system(cmd) != 0:
raise OSError('Failed to run sbt')
def __init__(self, platform, variant):
self.platform = platform
self.variant = "standard"
self.human_name = self.human_name + "-" + variant.upper()
self.reset = Signal()
self.jtag_clk = Signal()
self.jtag_enable = Signal()
self.jtag_capture = Signal()
self.jtag_shift = Signal()
self.jtag_update = Signal()
self.jtag_reset = Signal()
self.jtag_tdo = Signal()
self.jtag_tdi = Signal()
self.interrupt = Signal(32)
self.pbus = pbus = wishbone.Interface()
self.periph_buses = [pbus] # Peripheral buses (Connected to main SoC's bus).
self.memory_buses = [] # Memory buses (Connected directly to LiteDRAM).
# # #
self.cpu_params = dict(
# Clk / Rst.
i_debugCd_external_clk = ClockSignal(),
i_debugCd_external_reset = ResetSignal() | self.reset,
# Interrupts.
i_interrupts = self.interrupt,
# JTAG.
i_jtag_clk = self.jtag_clk,
i_debugPort_enable = self.jtag_enable,
i_debugPort_capture = self.jtag_capture,
i_debugPort_shift = self.jtag_shift,
i_debugPort_update = self.jtag_update,
i_debugPort_reset = self.jtag_reset,
i_debugPort_tdi = self.jtag_tdi,
o_debugPort_tdo = self.jtag_tdo,
# Peripheral Bus (Master).
o_peripheral_CYC = pbus.cyc,
o_peripheral_STB = pbus.stb,
i_peripheral_ACK = pbus.ack,
o_peripheral_WE = pbus.we,
o_peripheral_ADR = pbus.adr,
i_peripheral_DAT_MISO = pbus.dat_r,
o_peripheral_DAT_MOSI = pbus.dat_w,
o_peripheral_SEL = pbus.sel,
i_peripheral_ERR = pbus.err,
o_peripheral_CTI = pbus.cti,
o_peripheral_BTE = pbus.bte
)
if VexRiscvSMP.coherent_dma:
self.dma_bus = dma_bus = wishbone.Interface(data_width=VexRiscvSMP.dcache_width)
dma_bus_stall = Signal()
dma_bus_inhibit = Signal()
self.cpu_params.update(
# DMA Bus (Slave).
i_dma_wishbone_CYC = dma_bus.cyc,
i_dma_wishbone_STB = dma_bus.stb & ~dma_bus_inhibit,
o_dma_wishbone_ACK = dma_bus.ack,
i_dma_wishbone_WE = dma_bus.we,
i_dma_wishbone_SEL = dma_bus.sel,
i_dma_wishbone_ADR = dma_bus.adr,
o_dma_wishbone_DAT_MISO = dma_bus.dat_r,
i_dma_wishbone_DAT_MOSI = dma_bus.dat_w,
o_dma_wishbone_STALL = dma_bus_stall
)
self.sync += [
If(dma_bus.stb & dma_bus.cyc & ~dma_bus_stall,
dma_bus_inhibit.eq(1),
),
If(dma_bus.ack,
dma_bus_inhibit.eq(0)
)
]
def set_reset_address(self, reset_address):
assert not hasattr(self, "reset_address")
self.reset_address = reset_address
assert reset_address == 0x00000000
def add_sources(self, platform):
vdir = get_data_mod("cpu", "vexriscv_smp").data_location
print(f"VexRiscv cluster : {self.cluster_name}")
if not path.exists(os.path.join(vdir, self.cluster_name + ".v")):
self.generate_netlist()
# Add RAM.
# By default, use Generic RAM implementation.
ram_filename = "Ram_1w_1rs_Generic.v"
# On Altera/Intel platforms, use specific implementation.
from litex.build.altera import AlteraPlatform
if isinstance(platform, AlteraPlatform):
ram_filename = "Ram_1w_1rs_Intel.v"
platform.add_source(os.path.join(vdir, ram_filename), "verilog")
# Add Cluster.
platform.add_source(os.path.join(vdir, self.cluster_name + ".v"), "verilog")
def add_soc_components(self, soc, soc_region_cls):
# Define number of CPUs
soc.add_config("CPU_COUNT", VexRiscvSMP.cpu_count)
soc.add_constant("CPU_ISA", VexRiscvSMP.get_arch())
# Constants for cache so we can add them in the DTS.
if (VexRiscvSMP.dcache_size > 0):
soc.add_constant("cpu_dcache_size", VexRiscvSMP.dcache_size)
soc.add_constant("cpu_dcache_ways", VexRiscvSMP.dcache_ways)
soc.add_constant("cpu_dcache_block_size", 64) # hardwired?
if (VexRiscvSMP.icache_size > 0):
soc.add_constant("cpu_icache_size", VexRiscvSMP.icache_size)
soc.add_constant("cpu_icache_ways", VexRiscvSMP.icache_ways)
soc.add_constant("cpu_icache_block_size", 64) # hardwired?
# Constants for TLB so we can add them in the DTS
# full associative so only the size is described.
if (VexRiscvSMP.dtlb_size > 0):
soc.add_constant("cpu_dtlb_size", VexRiscvSMP.dtlb_size)
soc.add_constant("cpu_dtlb_ways", VexRiscvSMP.dtlb_size)
if (VexRiscvSMP.itlb_size > 0):
soc.add_constant("cpu_itlb_size", VexRiscvSMP.itlb_size)
soc.add_constant("cpu_itlb_ways", VexRiscvSMP.itlb_size)
# Add PLIC as Bus Slave
self.plicbus = plicbus = wishbone.Interface()
self.cpu_params.update(
i_plicWishbone_CYC = plicbus.cyc,
i_plicWishbone_STB = plicbus.stb,
o_plicWishbone_ACK = plicbus.ack,
i_plicWishbone_WE = plicbus.we,
i_plicWishbone_ADR = plicbus.adr,
o_plicWishbone_DAT_MISO = plicbus.dat_r,
i_plicWishbone_DAT_MOSI = plicbus.dat_w
)
soc.bus.add_slave("plic", self.plicbus, region=soc_region_cls(origin=soc.mem_map.get("plic"), size=0x400000, cached=False))
# Add CLINT as Bus Slave
self.clintbus = clintbus = wishbone.Interface()
self.cpu_params.update(
i_clintWishbone_CYC = clintbus.cyc,
i_clintWishbone_STB = clintbus.stb,
o_clintWishbone_ACK = clintbus.ack,
i_clintWishbone_WE = clintbus.we,
i_clintWishbone_ADR = clintbus.adr,
o_clintWishbone_DAT_MISO = clintbus.dat_r,
i_clintWishbone_DAT_MOSI = clintbus.dat_w,
)
soc.bus.add_slave("clint", clintbus, region=soc_region_cls(origin=soc.mem_map.get("clint"), size=0x10000, cached=False))
def add_memory_buses(self, address_width, data_width):
VexRiscvSMP.litedram_width = data_width
VexRiscvSMP.generate_cluster_name()
from litedram.common import LiteDRAMNativePort
if(not VexRiscvSMP.wishbone_memory):
ibus = LiteDRAMNativePort(mode="both", address_width=32, data_width=VexRiscvSMP.litedram_width)
dbus = LiteDRAMNativePort(mode="both", address_width=32, data_width=VexRiscvSMP.litedram_width)
self.memory_buses.append(ibus)
self.memory_buses.append(dbus)
self.cpu_params.update(
# Instruction Memory Bus (Master).
o_iBridge_dram_cmd_valid = ibus.cmd.valid,
i_iBridge_dram_cmd_ready = ibus.cmd.ready,
o_iBridge_dram_cmd_payload_we = ibus.cmd.we,
o_iBridge_dram_cmd_payload_addr = ibus.cmd.addr,
o_iBridge_dram_wdata_valid = ibus.wdata.valid,
i_iBridge_dram_wdata_ready = ibus.wdata.ready,
o_iBridge_dram_wdata_payload_data = ibus.wdata.data,
o_iBridge_dram_wdata_payload_we = ibus.wdata.we,
i_iBridge_dram_rdata_valid = ibus.rdata.valid,
o_iBridge_dram_rdata_ready = ibus.rdata.ready,
i_iBridge_dram_rdata_payload_data = ibus.rdata.data,
# Data Memory Bus (Master).
o_dBridge_dram_cmd_valid = dbus.cmd.valid,
i_dBridge_dram_cmd_ready = dbus.cmd.ready,
o_dBridge_dram_cmd_payload_we = dbus.cmd.we,
o_dBridge_dram_cmd_payload_addr = dbus.cmd.addr,
o_dBridge_dram_wdata_valid = dbus.wdata.valid,
i_dBridge_dram_wdata_ready = dbus.wdata.ready,
o_dBridge_dram_wdata_payload_data = dbus.wdata.data,
o_dBridge_dram_wdata_payload_we = dbus.wdata.we,
i_dBridge_dram_rdata_valid = dbus.rdata.valid,
o_dBridge_dram_rdata_ready = dbus.rdata.ready,
i_dBridge_dram_rdata_payload_data = dbus.rdata.data,
)
def do_finalize(self):
assert hasattr(self, "reset_address")
self.specials += Instance(self.cluster_name, **self.cpu_params)
# Add Verilog sources
self.add_sources(self.platform)
| 45.15914
| 180
| 0.58879
|
5e9d9f9f17099a123562474edb9fb0e2f24798ae
| 2,099
|
py
|
Python
|
tests/data_gen.py
|
Naillik1/python-schema-registry-client
|
2a69fe619cb2409eed1ac82d79048c0f68818b29
|
[
"MIT"
] | null | null | null |
tests/data_gen.py
|
Naillik1/python-schema-registry-client
|
2a69fe619cb2409eed1ac82d79048c0f68818b29
|
[
"MIT"
] | null | null | null |
tests/data_gen.py
|
Naillik1/python-schema-registry-client
|
2a69fe619cb2409eed1ac82d79048c0f68818b29
|
[
"MIT"
] | null | null | null |
import datetime
import os
import os.path
import faker
fake = faker.Faker()
epoch = datetime.datetime.utcfromtimestamp(0)
AVRO_SCHEMAS_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "avro_schemas")
def unix_time_millis(dt):
return (dt - epoch).total_seconds() * 1000.0
def get_schema_path(fname):
return os.path.join(AVRO_SCHEMAS_DIR, fname)
def load_schema_file(fname):
fname = get_schema_path(fname)
with open(fname) as f:
return f.read()
def create_basic_item(i):
return {"name": fake.first_name(), "number": fake.pyint(max_value=100)}
def create_adv_item(i):
friends = map(create_basic_item, range(1, 3))
family = map(create_basic_item, range(1, 3))
basic = create_basic_item(i)
basic["family"] = dict(map(lambda bi: (bi["name"], bi), family))
basic["friends"] = dict(map(lambda bi: (bi["name"], bi), friends))
return basic
def create_logical_item():
return {"metadata": {"timestamp": fake.past_datetime(), "total": fake.pydecimal(left_digits=2, right_digits=2)}}
def create_nested_schema():
return {
"name": fake.first_name(),
"uid": fake.pyint(min_value=0, max_value=9999, step=1),
"order": {"uid": fake.pyint(min_value=0, max_value=9999, step=1)},
}
BASIC_SCHEMA = load_schema_file(os.path.join(AVRO_SCHEMAS_DIR, "basic_schema.avsc"))
ADVANCED_SCHEMA = load_schema_file(os.path.join(AVRO_SCHEMAS_DIR, "adv_schema.avsc"))
BASIC_ITEMS = map(create_basic_item, range(1, 20))
USER_V1 = load_schema_file(os.path.join(AVRO_SCHEMAS_DIR, "user_v1.avsc"))
USER_V2 = load_schema_file(os.path.join(AVRO_SCHEMAS_DIR, "user_v2.avsc"))
LOGICAL_TYPES_SCHEMA = load_schema_file(os.path.join(AVRO_SCHEMAS_DIR, "logical_types_schema.avsc"))
ADVANCED_ITEMS = map(create_adv_item, range(1, 20))
NESTED_SCHENA = load_schema_file(os.path.join(AVRO_SCHEMAS_DIR, "nested_schema.avsc"))
ORDER_SCHENA = load_schema_file(os.path.join(AVRO_SCHEMAS_DIR, "order_schema.avsc"))
def cleanup(files):
for f in files:
try:
os.remove(f)
except OSError:
pass
| 29.985714
| 116
| 0.710815
|
74708b81ab15b8e15762e2e62b27fa3334060928
| 40,706
|
py
|
Python
|
sc_utils/sc.py
|
manuflores/time_machine
|
a8c3a7292ad4d8a7707173151eee35d9d79d5170
|
[
"MIT"
] | null | null | null |
sc_utils/sc.py
|
manuflores/time_machine
|
a8c3a7292ad4d8a7707173151eee35d9d79d5170
|
[
"MIT"
] | null | null | null |
sc_utils/sc.py
|
manuflores/time_machine
|
a8c3a7292ad4d8a7707173151eee35d9d79d5170
|
[
"MIT"
] | null | null | null |
import scipy.io as sio
import scipy.stats as st
#import scanpy as sc
import numpy as np
#import numba
import pandas as pd
import anndata as ad
from scipy import sparse
#from psutil import virtual_memory
import time
import os
import seaborn as sns
import toolz as tz
import h5py
import tqdm
from sklearn.utils import sparsefuncs
# Correct p-vals using Benjamini-Hochberg procedure
from statsmodels.stats.multitest import multipletests
def check_sparsity(adata):
"Returns the percentage of zeros in dataset."
if not sparse.isspmatrix_csr(adata.X):
adata.X = sparse.csr_matrix(adata.X)
sparsity = (1 - adata.X.data.shape[0] / (adata.n_obs*adata.n_vars))*100
return sparsity
# Make curried to allow kwarg calls on tz.pipe()
@tz.curry
def get_count_stats(
adata,
mt_prefix = None,
ribo_prefix = None)-> ad.AnnData:
"""
Returns an AnnData with extra columns in its `obs` object
for the number of counts per cell `n_counts` (and log10 (counts) ),
abd the number of expressed genes in each cell `n_genes`.
Additionally it can get the fraction of mitochondrial and ribosomal
genes if prefixes are provided.
TODO: Add filtering functionality
Params
------
adata (ad.AnnData)
Input dataset in AnnData format. It should contain a count matrix
(cells x genes) as the `.X` object in the AnnData.
mt_prefix (str, default = 'MT-'):
Prefix to match mitochondrial genes.
For human the prefix is `MT-` and for the mouse is `mt-`.
ribo_prefix(default=None)
For human the prefixes are ('RPS', 'RPL').
Returns
-------
adata (ad.AnnData)
AnnData with columns in the `.obs` dataframe corresponding to
count stats.
"""
if not sparse.isspmatrix_csr(adata.X):
adata.X = sparse.csr_matrix(adata.X)
# Number of transcripts per cell
adata.obs['n_counts'] = adata.X.sum(axis = 1)
adata.obs['log_counts'] = np.log10(adata.obs.n_counts)
# Number of genes with more than one count
adata.obs['n_genes'] = (adata.X > 0).sum(axis = 1)
# Get mitochondrial and ribosomal genes
if mt_prefix is not None:
# Use string methods from pandas to make bool array
mito_genes = adata.var.gene_name.str.startswith(mt_prefix)
if mito_genes.sum()> 1:
# Compute the fraction of mitochondrial genes
adata.obs["frac_mito"] = adata[:, mito_genes].X.A.sum(axis =1) / adata.obs.n_counts
if ribo_prefix is not None:
if isinstance(ribo_prefix, (list, tuple)):
# Initialize bool array
ribo_genes = np.zeros(adata.n_vars, dtype = bool)
# Loop through each prefix and flip to True
# where we get a match.
for prefix in ribo_prefix:
ribo_genes_tmp = adata.var.gene_name.str.startswith(prefix)
ribo_genes +=ribo_genes_tmp
if ribo_genes.sum()> 1:
adata.obs["frac_ribo"] = adata[:, ribo_genes].X.A.sum(axis =1) / adata.obs.n_counts
return adata
# Curry to be able to add arguments in a tz.pipe
@tz.curry
def lognorm_cells(
adata_,
scaling_factor = 1e4,
log = True)-> ad.AnnData:
"""
Cell count normalization as in scanpy.pp.normalize_total.
Expects count matrix in sparse.csr_matrix format.
Each gene's expression value in a given cell is given by :
g_i = \mathrm{ln} ( \frac{g_i \times \beta }{\sum g_i} + 1 )
where β is the scaling factor.
Params
------
adata_ (ad.AnnData):
Count matrix with cell and gene annotations.
scaling_factor(float, default = 1e4)
Factor to scale gene counts to represent the counts in
the cell. If scaling_factor =1e6, the values will
represent counts per million.
log (bool, default = True)
Optional argument to allow for returning the scaled cells
without normalizing.
Returns
-------
adata (ad.AnnData):
Anndata with normalized and log transformed count matrix.
"""
# Make a copy because normalization is done in-place
adata = adata_.copy()
if not sparse.isspmatrix_csr(adata.X):
adata.X = sparse.csr_matrix(adata.X)
# Get total counts per cell from `obs` df
if 'n_counts' in adata.obs.columns:
counts = adata.obs.n_counts.values
else:
counts = adata.X.sum(axis = 1).flatten()
# Convert to numpy matrix to array to be able to flatten
scaled_counts = np.array(counts).flatten() / scaling_factor
# Efficient normalization in-place for sparse matrix
sparsefuncs.inplace_csr_row_scale(adata.X, 1/scaled_counts)
# Call the log1p() method on the csr_matrix
if log:
adata.X = adata.X.log1p()
return adata
# Curry to enable adding arguments in a tz.pipe()
@tz.curry
def cv_filter(
adata,
min_mean = 0.025,
min_cv= 1,
return_highly_variable = False)-> ad.AnnData:
"""
Performs the Coefficient of Variation filtering according
to the Poisson / Binomial counting statistics. The model assumes
the coefficient of variation per gene is given by :
\mathrm{log} (CV) \approx - \frac{1}{2}\mathrm{log} (\mu) + \epsilon
The values will be computed assuming a normalized and
log-scaled count matrix.
Params
------
min_mean (float, default = 0.025).
Lower bound cutoff for the mean of the gene feature.
min_cv (float, default = None)
Lower bound for the coefficient of variation of the
gene feature. Recommended value 1.
return_highly_variable(bool, default = True)
Whether to return an AnnData with the columns corresponding
to only the highly variable genes.
Note: even when running with `return_highly_variable=False`
the function will return genes only with nonzero mean and
nonzero variance, i.e. it will discard those genes.
Returns
-------
adata_filt (ad.AnnData)
AnnData with coeffifient of variation stats on the `var`
dataframe.
"""
# Calculate mean and variance across cells
mean, var = sparsefuncs.mean_variance_axis(adata.X, axis = 0)
# Check if there are nonzero values for the mean or variance
ix_nonzero = list(set(np.nonzero(mean)[0]).intersection(set(np.nonzero(var)[0])))
if len(ix_nonzero) > 0:
# Use numpy-like filtering to select only genes with nonzero entries
adata = adata[:, ix_nonzero].copy()
# Recompute mean and variance of genes across cells
mean, var = sparsefuncs.mean_variance_axis(adata.X, axis = 0)
# Get nonzero mean indices
nz = np.nonzero(mean)
# Check that there are no nonzero mean values
assert adata.n_vars == nz[0].shape[0]
std_dev = np.sqrt(var)
# Element-wise coefficient of variation
cv = std_dev / mean
log_cv = np.log(cv)
log_mean = np.log(mean)
df_gene_stats = pd.DataFrame(
np.vstack([mean, log_mean, var, cv, log_cv]).T,
columns=["mean", "log_mean", "var", "cv", "log_cv"],
index = adata.var.index
)
new_adata_var = pd.concat(
[adata.var, df_gene_stats],
axis = 1
)
adata.var = new_adata_var
slope, intercept, r, pval, stderr = st.linregress(log_mean, log_cv)
# Check that slope is approx -1/2
print(f'The slope of the model is {np.round(slope,3)}.')
poisson_prediction_cv = slope*log_mean + intercept
# Binary array of highly variable genes
gene_sel = log_cv > poisson_prediction_cv
adata.var['highly_variable'] = gene_sel.astype(int)
if min_mean and min_cv is not None:
adata_filt = adata[:,((adata.var.highly_variable == True)&\
(adata.var['mean'] > min_mean)&\
(adata.var['cv'] > min_cv))].copy()
else:
adata_filt = adata[:, adata.var.highly_variable == True].copy()
if return_highly_variable:
return adata_filt
else:
return adata
def nmf_wrapper_sc(
data:np.array,
marker_dict,
n_clusters = 10,
n_enriched_feats=1,
feature_names=None,
)->tuple:
"""
Python wrapper to implement NMF algorithm.
It returns the W and H matrices, the cluster_labels per sample,
and the top enriched features per cluster.
Assumes that either that either sklearn or the
sklearn.decomposition.NMF class are loaded in
the workspace.
Params
-------
data: (pd.DataFrame or np.array)
Dataset to be decomposed. All datapoints need to
be non-negative.
marker_dict(dict)
feature_names (array-like):
Name of the columns of the dataset.
This is used to compute the feature enrichment.
n_enriched_feats (int):
number of top enriched features to extract from
each cluster. In the scRNAseq case, it amounts to get
the top genes in each cluster.
n_clusters (int):
Number of components to make the matrix factorization.
Returns
--------
nmf_W (np.ndarray):
Cluster matrix, it has (n_samples, n_clusters) shape.
nmf_H (np.ndarray):
Feature coefficient matrix, it has (n_clusters, n_feats)
cluster_labels (list):
Cluster label per sample.
enriched_feats (list or list of lists):
List of enriched features per cluster.
"""
from sklearn.decomposition import NMF
# Get the feature_names
if feature_names == None and type(data) == pd.DataFrame:
feature_names = data.columns.to_list()
elif feature_names == None and type(data) == np.ndarray:
print("No feature names provided, feature enrichment will not be annotated.")
else:
pass
#nmf = NMF(n_components=n_clusters).fit(data)
# Get W and H matrices
nmf_W = nmf.transform(data)
nmf_H = nmf.components_
# Initialize list to store cluster labels
cluster_labels = []
# Store the number of samples (rows in the original dataset)
n_samples = nmf_W.shape[0]
# Get sample cluster labels iterating over W matrix's rows
for i in range(n_samples):
# Get the index for which the value is maximal.
cluster_labels.append(nmf_W[i].argmax())
# Initialize to store enriched features
enriched_feats = []
# Get features cluster coefficients
# iterates over the H rows
for cluster_idx, cluster in enumerate(nmf_H):
top_feats = [
marker_dict[feature_names[i]] for i in cluster.argsort()[: -n_enriched_feats - 1 : -1]
]
enriched_feats.append(top_feats)
if n_enriched_feats == 1:
enriched_feats = np.array(enriched_feats).flatten()
return nmf_W, nmf_H, cluster_labels, enriched_feats
def safe_gene_selection(
adata,
input_list,
gene_colname = 'gene_name',
#keep_order=False
)-> ad.AnnData:
"""
Returns a new adata with a subset of query genes.
TODO: Handle case when we want to keep the query list.
Note: It will only return the genes that are in the dataset.
If any of the query genes are not in the dataset, the gene names
will be dismissed. If you're not too sure of the exact gene names
check the `df.gene_colname.str.contains()` or the
`df.gene_colname.str.startswith()` function.
Params
------
adata(ad.AnnData)
Dataset to select from.
input_list(array-like)
Query list with gene names.
gene_colname (str, default = 'gene_name')
Name of the column in the .var object from which to
make the query against.
Returns
-------
new_adata (ad.AnnData)
Subset of original anndata containg only query genes.
Example
-------
# Initalize dummy list and shuffle it
gene_names = list('ABCDEFGHIJ')
rng = np.random.default_rng(seed = 9836)
gene_names = rng.permutation(gene_names)
print(gene_names)
# Create adata with random 5 cell 10 gene count matrix
a = ad.AnnData(
X = np.random.random((5, 10)),
var= pd.DataFrame(gene_names, columns = ['gene_name'])
)
my_list = ['A', 'C', 'B','D']
ada_new = sc.safe_gene_selection(a, my_list)
print(ada.var.gene_name.values)
>>> array(['A', 'B', 'C', 'D'], dtype=object)
"""
# Gets the indices of the rows contained in my_list using bool array
isin_indexer = adata.var[gene_colname].isin(input_list).values.nonzero()[0]
# Returns the indices that sort the values
# selected with the indexer array
new_ixs = np.argsort(adata.var[gene_colname].values[isin_indexer])
isin_list_sorted_ixs = isin_indexer[new_ixs]
adata_new = adata[:, isin_list_sorted_ixs].copy()
return adata_new
def make_hist_from_latent(data, sample_id, cols_list, channels,
resolution, export_path, export = True, **kwargs):
"""
Returns an n-d histogram from the latent space representation
of a sample.
Params
------
data (pd.DataFrame)
Dataframe containing latent representation coordinates
of a sample.
sample_id (str)
Name of the sample to encode.
cols_list (list)
List of the columns to extract the representation from.
channels (int)
Size of the first dimension which will serve as a "channel"
dimension.
resolution (int)
Size of the second and third dimension.
export (bool, default = True )
Whether to export the resulting histogram as an image.
**kwargs
Keyword arguments passed to the np.histogramdd function.
Returns
-------
hist(array-like)
3D histogram of the sample's latent space representation.
"""
# Extract latent space to compute bounds
data_ = data[cols_list]
# Round to first decimal
mins_ = np.round(data_.min() - 0.01, 1)
maxs_ = np.round(data_.max() - 0.01, 1)
print(f'maxs: {maxs_}, mins: {mins_}')
bounds = list(zip(mins_, maxs_))
# Get the data for the corresponding
samples = data[data['sample_id_unique'] == sample_id][cols_list].values
print(f'Number of cells in dataset: {samples.shape}' )
# Get n-d hist of data
hist, edges = np.histogramdd(
samples,
bins = (channels, resolution, resolution),
range = bounds,
#normed = True,
**kwargs
)
if export:
sample_id = sample_id.split(' ')[0].split('/')[0]
fname = export_path + 'latent_' + sample_id + '.npy'
np.save(fname, hist)
return fname, hist
else:
return hist
def simple_bootstrap_df(df, n_boostrap = 10, reset_index = False):
"""
TO-DO: write documentation.
"""
n_cells = df.shape[0]
# Sample n_cells
#df_list =
df_bootstrap = df.copy()
# Define original data as bs_sample zero
df_bootstrap['bs_sample'] = 0
for i in range(1, n_boostrap +1):
bootstrap_sample = df.sample(df.shape[0], replace = True)
bootstrap_sample['bootstrap_sample'] = i
df_bootstrap = pd.concat([df_bootstrap, bootstrap_sample])
#
if reset_index:
df_bootstrap = df_bootstrap.reset_index(drop = True)
return df_bootstrap
def stratified_bootstrap_df(df, col_stratify = 'cell_ontology_class',
n_bootstrap = 10, reset_index = False, verbose = True):
"""
Returns a new dataframe by sampling n_bootstrap datasets with replacement from
the original dataframe df. In this version it also keeps the relative proportion of
datapoints on col_stratify.
Params
-------
df ()
col_stratify (str, default = 'cell_ontology_class')
n_bootstrap(int, defualt = 10)
Number of bootstrap samples.
reset_index(bool, default = True)
Whether to reset the index of the new dataset.
Returns
-------
df_bootstrap(pd.DataFrame)
Dataset with bootstrapped samples. It contains the original dataset.
"""
df_bootstrap = df.copy()
# Name the original dataset bs sample zero
df_bootstrap['bootstrap_sample'] = 0
for i in range(1, n_bootstrap + 1):
sampling_ix = (
df.groupby(col_stratify)
.apply(lambda group_df: group_df.sample(group_df.shape[0], replace=True))
.index.get_level_values(1)
)
bootstrap_sample = df.loc[sampling_ix, :]
bootstrap_sample['bootstrap_sample'] = i
df_bootstrap = pd.concat([df_bootstrap, bootstrap_sample])
if reset_index:
df_bootstrap = df_bootstrap.reset_index(drop = True)
if verbose:
n_non_sampled = len(set(df.index) - set(df_bootstrap[df_bootstrap['bootstrap_sample'] != 0].index))
print(f'Number of indices not sampled: {n_non_sampled}')
return df_bootstrap
def get_cell_type_props(df, var_id, agg_var = 'age'):
"""
Returns a dataframe with cell type proportions across the Tabula Muris' dataset
tissues, for a specified age section.
Params
------
df (pd.DataFrame)
Summary dataframe of Tabula Muris senis. It should contains the following
columns = ['age', 'age_code', 'tissue', 'cell_ontology_class']
agg_var (str)
Particular variable to select for.
Common used examples are age and mouse.id.
Returns
-------
normalized_tissue(pd.DataFrame)
Tissue with cell type proportions
"""
sub_df = df[df[agg_var] == var_id]
tissue_counts = (
sub_df.groupby(["tissue", "cell_ontology_class"])
.size()
.to_frame()
.reset_index()
.rename(columns={0: "counts"})
)
pivot = pd.pivot_table(
data = tissue_counts,
index ="tissue",
columns = 'cell_ontology_class',
fill_value = 0
)
# Try alternative
#pivot = sub_df.groupby(["tissue", "cell_ontology_class"]).size().unstack()
tissue_total = pivot.sum(axis = 1)
normalized_tissue = pivot.div(tissue_total, axis = 'rows')
normalized_tissue = normalized_tissue.droplevel([None], axis = 1)
# Annotate with age column
normalized_tissue =normalized_tissue.T
normalized_tissue[agg_var] = var_id
normalized_tissue = normalized_tissue.fillna(0)
return normalized_tissue
def get_single_tissue(df_tissues, tissue_name, agg_var, drop_zeros = True):
"""
Returns cell type proportions of a single tissue.
Params
------
df_tissues(pd.DataFrame)
Pivoted dataframe of (cell_types, tissues) shape.
tissue_name (str)
Name of the tissue to select
agg_var (str)
Particular variable to select for.
Common used examples are age and mouse.id.
Returns
-------
tissue (pd.DataFrame)
Subdataframe of a single tissue of shape
"""
tissue = pd.pivot_table(
data = df_tissues[[tissue_name, agg_var]].reset_index(),
index = 'cell_ontology_class',
columns = agg_var,
aggfunc= 'mean',
fill_value = 0
)
# Drop the multiindex name that corresponds to the tissue name
tissue = tissue.droplevel(None, axis = 1)
if drop_zeros:
# Eliminate all cell types with no counts in tissue
tissue = tissue[tissue.sum(axis = 1) > 0]
# Drop the individuals with no counts for given tissue
tissue = tissue.loc[:, (tissue !=0).any(axis = 0)]
# Renormalize due to aggregation effects
if agg_var == 'age':
tissue = tissue / tissue.sum(axis = 0)
return tissue
def read_cancer_adata(
path,
h5_fname,
meta_fname)->(ad.AnnData, pd.DataFrame):
"""
Load and convert .h5 file to AnnData from the publication:
https://www.nature.com/articles/s41591-020-0844-1
"""
# Load metadata file
df_meta = pd.read_csv(path + meta_fname)
h5_file = h5py.File(path + h5_fname)
dset_name = list(h5_file.keys())[0]
data = h5_file[dset_name]["data"][()]
indices = h5_file[dset_name]["indices"][()]
indptr = h5_file[dset_name]["indptr"][()]
shape = h5_file[dset_name]["shape"][()]
gene_names = h5_file[dset_name]["gene_names"][()]
gene_names = [x.decode('ascii') for x in gene_names]
barcodes = h5_file[dset_name]["barcodes"][()]
barcodes = [x.decode('ascii') for x in barcodes]
adata = ad.AnnData(
X = sparse.csc_matrix(
(data, indices, indptr),
shape = shape
).T,
#obs = df_meta,
var= pd.DataFrame(gene_names, columns = ['gene_names'])
)
# Check if count matrix is in csr format
#assert sparse.isspmatrix_csr(adata.X)
return adata, df_meta
def load_pbmc_markers():
pbmc_cell_type_markers = {
'IL2RA' :'T cell',
'TNFRSF18' :'T cell',
'CD3D': 'T cell',
'BATF': 'T helper',
'IL7R': 'T helper',
'CD4': 'T helper',
'GNLY': 'NK T cell',
'NKG7': 'NK T cell',
'CD56': 'NK T cell',
'CCL5': 'Cytotoxic T cell',
'CD8A': 'Cytotoxic T cell',
'CD16': 'Cytotoxic T cell',
'MS4A1': 'B cell',
'CD79A': 'B cell',
'LYZ': 'Monocyte',
'FCGR3A': 'Monocyte',
'MS4A7': 'Monocyte',
'CD163': 'Macrophage',
'GBP1': 'Macrophage',
'FCER1A': 'Dendritic',
'LAD1': 'Dendritic',
'LAMP3': 'Dendritic',
'FLT3': 'Dendritic',
'CST3': 'Dendritic',
'PPBP': 'Megakaryocytes',
}
return pbmc_cell_type_markers
def load_cell_type_collapser():
cell_type_collapser = {
'B cell': 'B cell',
'T cell': 'T cell',
'Cytotoxic T cell': 'T cell',
'T helper': 'T cell',
'NK T cell': 'T cell',
'Macrophage': 'Monocyte',
'Monocyte': 'Monocyte',
'Dendritic': 'Monocyte',
'Megakaryocytes': 'Monocyte'
}
return cell_type_collapser
def confusion_matrix(pred_labels, true_labels):
"""
Returns a confusion matrix from a multiclass classification
set of labels.
Params
------
pred_labels (array-like):
List of labels as predicted by a classification algorithm.
true_labels (array-like):
List of ground truth labels.
Returns
-------
conf_mat (array-like):
Confusion matrix.
"""
n_labels = int(max(np.max(pred_labels), np.max(true_labels)) + 1)
conf_mat = np.zeros(shape = (n_labels, n_labels))
for (i, j) in zip(pred_labels, true_labels):
conf_mat[i,j] +=1
return conf_mat
def element_wise_entropy(px):
"""
Returns a numpy array with element wise entropy calculated as -pi*log_2(p_i).
Params
------
px (np.array)
Array of individual probabilities, i.e. a probability vector or distribution.
Returns
-------
entropy (np.array)
Array of element-wise entropies.
"""
if isinstance(px, list):
px = np.array(px)
# Make a copy of input array
entropy = px.copy()
# Get indices of nonzero probability values
nz = np.nonzero(entropy)
# Compute -pi*log_2(p_i) element-wise
entropy[nz] *= - np.log2(entropy[nz])
return entropy
def entropy(ps):
"Returns the entropy of a probability distribution `ps`."
# Get nonzero indices
nz = np.nonzero(ps)
# Compute entropy for nonzero indices
entropy = np.sum(-ps[nz]*np.log2(ps[nz]))
return entropy
# EXPERIMENTAL: gene analysis
def score_cells_percentile(val, percs, score_hi=1, score_low=-1):
"""
Returns a score given value of a single cell.
Cells with score = -1 will be cells in low prototype (cells below the
bottom percentile). Conversely, cells with a score =1, will be
the top prototype.
Params
------
val (float):
Value of the cell at given dimension.
percs (tuple):
Tuple of top and bottom percentiles to compare with.
Returns
-------
score(int)
Score of the cell.
"""
low_perc, hi_perc = percs
if val > hi_perc:
score = score_hi
elif val < low_perc:
score = score_low
else:
score = 0
return score
def score_cells_along_component(df_proj, latent_dim, percs = (10, 90)):
"""
Returns an array of scores for each of the cells along
a latent dimension.
Params
------
latent_dim (array-like)
Array of values for each cell in the latent dimension or component.
percs (tuple, default= (10,90))
Low and bottom percentiles to compute.
Returns
-------
scores (array-like)
List of scores for each cell.
"""
low_perc, hi_perc = np.percentile(df_proj[latent_dim], percs)
scores = [score_cells_percentile(p, (low_perc, hi_perc)) for p in df_proj[latent_dim]]
return scores
# def get_scanpy_deg_report_df(adata, groups = ('-1','1')):
# """
# Returns a report dataframe of differentially expressed genes.
# It expects an adata with a report dictionary from the output of
# scanpy.tl.rank_genes_groups().
# Params
# ------
# adata (ad.AnnData)
# AnnData with rank_genes_groups dictionary in `.uns` object.
# Ideally, this adata would only contain "prototype" cells,
# that is, the cells on the extremes of a given component.
# groups (tuple, default = (-1,1))
# Tuple of groups for which to extract the DEG results.
# """
# deg_result_dict = adata.uns['rank_genes_groups']
# # Make a report dataframe.
# df_report = pd.DataFrame()
# for g in groups:
# df = pd.DataFrame(
# np.vstack(
# [deg_result_dict["names"][g],
# deg_result_dict["logfoldchanges"][g],
# deg_result_dict["pvals_adj"][g]]
# ).T,
# columns=["gene_name", "log_fc", "pval_adj"],
# )
# df["group"] = g
# df_report = pd.concat([df_report, df])
# return df_report
# Curry to enable passing arguments to .apply() method
@tz.curry
def is_significant(l1_score, thresh_over = -.8, thresh_under = .5):
if l1_score < thresh_over or l1_score > thresh_under:
return 1
else:
return 0
def over_under_exp(l1_score):
"To be used after extracting significant genes."
if l1_score < 0 :
return 'over'
elif l1_score > 0:
return 'under'
else:
return None
def deg_analysis(
adata,
groupby_col = 'scores',
groups = [-1,1],
thresh_under= .5,
thresh_over = -.5,
return_all_genes = False
)->pd.DataFrame:
"""
TO-DO: Enable parallelism
Returns a dataframe containing the report of differential gene expression (DEG) analysis
between two groups of cells. It tests the hypothesis that all genes in group 1
have higher values (are overexpressed) than genes in group -1 using non-parametric tests.
It assumes genes are log-normed and expects that the AnnData has a column that indicates
the groups of cells to make the analysis on, in the `obs` DataFrame.
The function uses the Kolmogorov-Smirnov (KS) 2-sample test on the ECDFs and the L1-norm
on the histogram of the gene distributions, respectively. We use the L1 norm to assign a
significance of over or underexpression. We consider a L1 score < -0.5 for a gene to be
overexpressed in group 1 and a L1 score of > 0.5 to be underexpressed in group 1.
Params
------
adata (ad.AnnData)
AnnData with an .obs dataframe containing a column to select the groups.
group_by (str, default = 'scores')
Column that indicates the groups to make the test on.
groups (list, default = (-1,1))
Name of the groups.
Returns
-------
df_report (pd.DataFrame)
Notes
-----
The KS runs the following hypothesis test:
* *H0*: The ECDF of gene$_i$ in (group 1) $F$ is equal
to the distribution of (group -1) $G$, i.e. $F(u) = G(u)$ for all quantiles $u$.
* *H1* : The distribution $F$ for gene$_i$ has **higher values** in (group 1) compared
to (group -1). Having higher values implies that for a given quantile *u*,
the values of the CDF of (group 1) will be lower than those of the non-lactating state,
that is $F(u) <= G(u)$ for all quantiles $u$.
The L1 norm is computed as reported in [Chen *et al* (2020)]
(https://www.pnas.org/content/117/46/28784)
"""
df_report = pd.DataFrame()
# Get gene name from the `.var` dataframe
gene_names = adata.var.gene_name.values
# Unpack groups
gp_lo, gp_hi = groups
# Loop through all genes
for ix in tqdm.tqdm(range(adata.n_vars)):
gene_name = gene_names[ix]
# Extract distributions as arrays
gene_distro_lo = adata[adata.obs[groupby_col]== gp_lo, ix].X.A.flatten()
gene_distro_hi = adata[adata.obs[groupby_col]== gp_hi, ix].X.A.flatten()
# Run KS test
# Second element in results tuple is P-value
ks_test_pval = st.ks_2samp(
gene_distro_lo,
gene_distro_hi,
alternative = 'greater',
#mode = 'exact'
)[1]
# Compute L1 norm
l1_score = l1_norm(gene_distro_hi, gene_distro_lo)
# log2 fold-change of means
log_fold_change = np.log2(gene_distro_hi.mean() / gene_distro_lo.mean())
# Wrap into a dataframe
#pd.DataFrame(
df = {
'gene_name': gene_name,
'ks_pval' : ks_test_pval,
'l1_score' : l1_score,
'log_fc': log_fold_change
}
#)
df_report = df_report.append(df, ignore_index = True)
df_report.reset_index(drop =True, inplace = True)
df_report = df_report.sort_values(by = ['l1_score'], ascending = True)
_, pvals_corr, _, _ = multipletests(df_report.ks_pval.values, method = 'fdr_bh')
df_report['ks_pval_BH'] = pvals_corr
if return_all_genes:
return df_report
else:
#
df_report['is_signif'] = df_report.l1_score.apply(
is_significant(thresh_over = thresh_over, thresh_under = thresh_under)
)
df_deg = df_report[df_report['is_signif']==1]
df_deg['deg_type'] = df_deg.l1_score.apply(over_under_exp)
return df_deg
def deg_test(
adata, annot_cols, groupby = 'scores', groups = [-1,1], return_melted = True
)->(pd.DataFrame):
"""
Runs DEG analysis and return dataframes for visualization.
Params
------
adata (ad.AnnData)
Annotated count matrix in AnnData format.
annot_cols (list)
Subset of the columns in the adata `obs` dataframe
to use for visualization.
groupby (str, default = 'scores')
Column that indicates the groups to make the test on.
groups(list, default = [-1,1])
Name of the groups.
return_melted(bool, default = True)
If set to True, it returns a melted version of the df_viz
dataframe, useful for plotting distribution plots (e.g. boxplots,
violinplots) of the differentially expressed genes.
Returns
-------
df_deg (pd.DataFrame)
Report dataframe from DEG analysis.
df_viz (pd.DataFrame)
DataFrame to visualize scatterplots colored by gene counts,
or heatmaps.
df_distro_viz (pd.DataFrame, optional)
Tidy dataframe to visualize distributions of genes.
Example
-------
### Assuming an anndata is loaded in workspace
# Run DEG analysis and plot overexpressed gene distributions.
df_deg, df_viz, df_distro_viz = sc.deg_test(adata, annot_cols)
df_viz_prototype = df_distro_viz[df_distro_viz['scores'] != 0]
plt.figure(figsize = (4, 9))
sns.violinplot(
data = df_viz_prototype[df_viz_prototype['deg_type'] == 'over'],
y = 'gene_name',
x = 'log(counts)',
hue = 'classification column',
palette= viz.get_binary_palettes()[1],
split = True,
scale = 'width',
inner = 'quartile',
cut = 0
)
"""
df_deg = deg_analysis(adata, groupby_col = groupby, groups = groups)
overexp_genes = df_deg[df_deg['deg_type'] == 'over'].gene.to_list()
underexp_genes = df_deg[df_deg['deg_type'] == 'under'].gene.to_list()
deg_gene_list = overexp_genes + underexp_genes
# Get a subset of the original adata containing the DE genes only.
de_genes_adata = safe_gene_selection(adata, deg_gene_list)
# Make DataFrame for visualization
de_genes_df = pd.DataFrame(
de_genes_adata.X.A,
columns = de_genes_adata.var.gene_name
)
# Concatenate annotation with gene values
# This dataset serves to make scatterplots using
# the gene expression values to color the dots
df_viz = pd.concat(
[adata.obs[annot_cols],
de_genes_df.set_index(adata.obs.index)],
axis = 1
)
if return_melted:
# Melt to visualize the distribution of genes
# across groups, e.g. violinplots
df_distro_viz = pd.melt(
df_viz,
id_vars = annot_cols,
value_vars = de_genes_df.columns.to_list(),
var_name = 'gene_name',
value_name = 'log(counts)'
)
# Check that melt frame is of length n_de_genes * n_cells
assert df_distro_viz.shape[0] == len(deg_gene_list)*adata.n_obs
# Add column indicating whether gene is over or underexpressed
# in group 1
gene_dge_type_mapper= dict(df_deg[['gene_name', 'deg_type']].values)
df_distro_viz['deg_type'] = df_distro_viz.gene_name.map(
gene_dge_type_mapper
)
return df_deg, df_viz, df_distro_viz
else:
return df_deg, df_viz
def annot_genes(gene, gene_sets_tuple):
"""
Helper function to annotate a given gene in prototype cells
enriched genes.
Returns -1 if gene is in bottom prototype, 1 if in top prototype.
"""
gene_set_low, gene_set_hi = gene_sets_tuple
if gene in gene_set_low:
y = -1
elif gene in gene_set_hi:
y = 1
else:
y = 0
return y
def fisher_enrichment_test(
df_annot,
annotation,
group,
group_colname = 'cluster_labels',
n_top = 5
)->pd.DataFrame:
"""
Returns a report dataframe with the top 5 enriched functions
for a given subset of data. This function is especially suited
for statistical enrichment tests after clustering.
Params
------
df_annot (pd.DataFrame)
Annotated dataframe containing the 'annotation' column
and a 'clus_col_name' column.
annotation (str)
Annotation to make the enrichment test on. In the case
of gene set enrichment this could be a Gene Ontology
or COG annotation.
cluster (int or str)
Cluster (or in general group of data points) to test.
clus_col_name (str)
Name of the cluster column in the df_annot dataframe.
Returns
-------
df_report (pd.DataFrame)
Report dataframe with pvalues and annotation names.
"""
# Get subset of completely annotated genes
df_test = df_annot[pd.notnull(df_annot[annotation])]
# Number of genes with valid annotation
M = df_test.shape[0]
# Extract data for given cluster
df_clus = df_test[df_test[group_colname] == group]
# Get n_top categories to test (defaults to 5)
cats = df_clus[annotation].value_counts().head(n_top).index.to_list()
# Number of genes in the cluster (sample size)
N = df_clus.shape[0]
# Initialize pvalue array
pvals = np.empty(len(cats))
# Loop through the top categories
for i, cat in enumerate(cats):
df_cat = df_test[df_test[annotation] == cat]
# Total number of genes that map to given category (total number of white balls)
n = df_cat.shape[0]
# Number of genes inside cluster that map to given category (number of white balls in sample)
x = df_clus[df_clus[annotation] == cat].shape[0]
# Sweep through the probabilities from x to n
pmfs = st.hypergeom.pmf(k = np.arange(x, n + 1), N = N, n = n, M = M)
# Compute pval
pvals[i] = pmfs.sum()
# Save results
df_report = pd.DataFrame(
{'categories': cats, 'pval': pvals}
)
df_report['group'] = group
df_report['annot'] = annotation
return df_report
def run_fisher_test_go(
df_report_deg,
path,
groupby,
clus_col_name = 'prototype',
groups = [-1,1],
n_top = 5,
organism = 'human'
)->(pd.DataFrame, pd.DataFrame):
"""
Returns (1) a dataframe with enriched Gene Ontology terms
for a set of differentially expressed genes, and
(2) a more detailed dataframe of the Gene Ontology terms with their GO ID,
and whether or not they are Transcription Factors.
Params
------
df_report_deg(pd.DataFrame)
Differentially DEG report dataframe.
path_to_ds(str)
Path to drug screen dataset.
n_top (int)
Number of categories to compute the Fisher enrichment test.
organism (str, default = 'human')
Name of the organism. Currently handling ['human', 'mouse'].
Returns
-------
df_enrichment_report(pd.DataFrame)
Report of enriched GO terms with corresponding pvals.
df_go_red (pd.DataFrame)
Dataframe of enriched genes with their corresponding
GO biological process.
"""
# path = 'path_to_drug_screen'
# Unpack groups
gp_lo, gp_hi = groups
#Load Gene Ontology dataset (from QuickGO annotation)
df_go = pd.read_csv(path + 'go/go_lite_' + organism +'.csv')
# Get gene names of DEG genes
genes_disease = df_report_deg[df_report_deg[groupby]== gp_lo]['gene_name'].values
genes_healthy = df_report_deg[df_report_deg[groupby] == gp_hi]['gene_name'].values
gene_sets = (genes_disease, genes_healthy)
# Annotate genes as prototypes:
# These annotations will help the Fisher enrichment test function
df_go['deg_type'] = [annot_genes(g, gene_sets) for g in df_go.gene_name.values]
# Compute FET for bottom and top prototypes
df_enrichment_lo_prot = fisher_enrichment_test(
df_annot = df_go,
group_colname = clus_col_name,
annotation = 'GO NAME',
group = -1,
n_top = n_top
)
df_enrichment_top_prot = fisher_enrichment_test(
df_annot = df_go,
group_colname = clus_col_name,
annotation = 'GO NAME',
group = 1,
n_top = n_top
)
df_enrichment_report = pd.concat([df_enrichment_lo_prot,
df_enrichment_top_prot])
df_enrichment_report = df_enrichment_report.\
sort_values(by = ['group', 'pval']).reset_index(drop = True)
#df_enrichment_report = df_enrichment_report.rename(columns = {'clusters':'prototype'})
# Get GO categories for differentially expressed genes
# i.e. in prototypes -1 or 1 (not in 0).
try:
df_go_red = df_go[df_go['deg_type'].isin([-1,1])]
except:
df_go_red = df_go[df_go['deg_type'].isin(['-1','1'])]
# Load Transcription Factor gene names
path_to_tfs = path + '../trns/' + organism + '_trn/'+ organism + '_tfs.csv'
tf = pd.read_csv(path_to_tfs)
# Annotate if DEG genes are TFs
df_go_red['is_tf'] = df_go_red.gene_name.apply(
lambda x: 1 if x in tf['gene_name'].unique() else 0
)
return df_enrichment_report, df_go_red
def ecdf(x, plot = False, label = None)->(np.array, np.array):
'''
Returns ECDF of a 1-D array. Optionally
Params
------
x(array or list)
Input array, distribution of a random variable.
plot (bool, default= False)
If True return the plot of the ECDF
label(str)
Label for the plot
Returns
-------
x_sorted : sorted x array.
ecdf : array containing the ECDF of x.
'''
x_sorted = np.sort(x)
n = len (x)
ecdf = np.linspace(0, 1, len(x_sorted))
if label is None and plot is True:
plt.scatter(x_sorted, ecdf, alpha = 0.7)
elif label is not None and plot is True:
plt.scatter(x_sorted, ecdf, alpha = 0.7, label = label)
else:
pass
return x_sorted, ecdf
def freedman_diaconis_rule(arr):
"""
Calculates the number of bins for a histogram using the Freedman-Diaconis Rule.
Modified from https://github.com/justinbois/bebi103/blob/master/bebi103/viz.py
"""
h = 2* (np.percentile(arr, q=75) - np.percentile(arr, q = 25))/ np.cbrt(len(arr))
if h == 0.0:
n_bins = 3
else:
n_bins = int(np.ceil(arr.max() - arr.min()) / h)
return n_bins
def l1_norm(arr1, arr2):
'''
Compute the L1-norm between two histograms.
It uses the Freedman-Diaconis criterion to determine the number of bins.
It will be positive if the mean(arr2) > mean(arr1) following the convention
from PopAlign.
Modified from https://github.com/thomsonlab/popalign/blob/master/popalign/popalign.py
Parameters
----------
arr1 (array-like)
Distribution of gene for population 1.
arr2 (array-like)
Distribution of gene for population 2.
Returns
-------
l1_score(float)
L1 norm between normalized histograms of gene distributions.
Example
-------
import numpy as np
from sc_utils import sc
x = np.random.normal(loc = 0, size = 100)
y = np.random.normal(loc = 3, size = 100)
sc.l1_norm(x, y)
>>>1.46
'''
if len(arr1) == len(arr2):
nbins = freedman_diaconis_rule(arr1)
else:
nbins_1 = freedman_diaconis_rule(arr1)
nbins_2 = freedman_diaconis_rule(arr2)
nbins = int((nbins_1 + nbins_2)/2)
max1, max2 = np.max(arr1), np.max(arr2) # get max values from the two subpopulations
max_ = max(max1,max2) # get max value to define histogram range
if max_ == 0:
return 0
else:
b1, be1 = np.histogram(arr1, bins=nbins, range=(0,max_)) # compute histogram bars
b2, be2 = np.histogram(arr2, bins=nbins, range=(0,max_)) # compute histogram bars
b1 = b1/len(arr1) # scale bin values
b2 = b2/len(arr2) # scale bin values
if arr1.mean()>=arr2.mean(): # sign l1-norm value based on mean difference
l1_score = -np.linalg.norm(b1-b2, ord=1)
return l1_score
else:
l1_score = np.linalg.norm(b1-b2, ord=1)
return l1_score
| 26.211204
| 101
| 0.662163
|
745b201f2d653134665cbef21d2a264d92288e7f
| 2,265
|
py
|
Python
|
kucoin/exceptions.py
|
nielskool/python-kucoin
|
a511b2b2dd157d23d61b3df6af004ddd8b30c45c
|
[
"MIT"
] | null | null | null |
kucoin/exceptions.py
|
nielskool/python-kucoin
|
a511b2b2dd157d23d61b3df6af004ddd8b30c45c
|
[
"MIT"
] | null | null | null |
kucoin/exceptions.py
|
nielskool/python-kucoin
|
a511b2b2dd157d23d61b3df6af004ddd8b30c45c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import json
# System error codes
# Code Meaning
# 403000 No Auth -- The requested is forbidden for current API key.
# 404000 Url Not Found -- The request resource could not be found
# 400100 Parameter Error -- You tried to access the resource with invalid parameters
# 411100 User are frozen -- User are frozen, please contact us via support center.
# 500000 Internal Server Error -- We had a problem with our server. Try again later.
class KucoinAPIException(Exception):
"""Exception class to handle general API Exceptions
`code` values
`message` format
"""
def __init__(self, response):
self.code = ''
self.message = 'Unknown Error'
try:
json_res = response.json()
except ValueError:
self.message = response.content
else:
if 'error' in json_res:
self.message = json_res['error']
if 'msg' in json_res:
self.message = json_res['msg']
if 'message' in json_res and json_res['message'] != 'No message available':
self.message += ' - {}'.format(json_res['message'])
if 'code' in json_res:
self.code = json_res['code']
if 'data' in json_res:
try:
self.message += " " + json.dumps(json_res['data'])
except ValueError:
pass
self.status_code = response.status_code
self.response = response
self.request = getattr(response, 'request', None)
def __str__(self): # pragma: no cover
return 'KucoinAPIException {}: {}'.format(self.code, self.message)
class KucoinRequestException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return 'KucoinRequestException: {}'.format(self.message)
class MarketOrderException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return 'MarketOrderException: {}'.format(self.message)
class LimitOrderException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return 'MarketOrderException: {}'.format(self.message)
| 30.608108
| 87
| 0.617219
|
d853120e4fa42fef3c8bf617cb14b911a05406f8
| 4,258
|
py
|
Python
|
btclib/electrum.py
|
giubby84/btclib
|
0dd7e4e8ca43451a03b577fd7ec95715a1a21711
|
[
"MIT"
] | null | null | null |
btclib/electrum.py
|
giubby84/btclib
|
0dd7e4e8ca43451a03b577fd7ec95715a1a21711
|
[
"MIT"
] | null | null | null |
btclib/electrum.py
|
giubby84/btclib
|
0dd7e4e8ca43451a03b577fd7ec95715a1a21711
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"""Electrum entropy / mnemonic / seed functions.
Electrum mnemonic is versioned, conveying BIP32 derivation rule too.
"""
import hmac
from hashlib import pbkdf2_hmac, sha512
from typing import Tuple
from .entropy import (
BinStr,
Entropy,
_entropy_from_indexes,
_indexes_from_entropy,
binstr_from_entropy,
)
from .mnemonic import (
Mnemonic,
_indexes_from_mnemonic,
_mnemonic_from_indexes,
_wordlists,
)
_MNEMONIC_VERSIONS = {
"standard": "01", # P2PKH and P2MS-P2SH wallets
"segwit": "100", # P2WPKH and P2WSH wallets
"2fa": "101", # Two-factor authenticated wallets
"2fa_segwit": "102", # Two-factor authenticated wallets, using segwit
}
def version_from_mnemonic(mnemonic: Mnemonic) -> Tuple[str, str]:
"""Return the (Electrum version, clean mnemonic) tuple.
The clean mnemonic is free from spurious whitespace characters
(extra spaces, tab, newline, return, formfeed, etc.)
"""
# split remove spurious whitespaces
mnemonic = " ".join(mnemonic.split())
s = hmac.new(b"Seed version", mnemonic.encode(), sha512).hexdigest()
if s.startswith(_MNEMONIC_VERSIONS["standard"]):
return "standard", mnemonic
if s.startswith(_MNEMONIC_VERSIONS["segwit"]):
return "segwit", mnemonic
if s.startswith(_MNEMONIC_VERSIONS["2fa"]):
return "2fa", mnemonic
if s.startswith(_MNEMONIC_VERSIONS["2fa_segwit"]):
return "2fa_segwit", mnemonic
m = f"unknown electrum mnemonic version: '{s[:3]}'; "
m += f"not in {list(_MNEMONIC_VERSIONS.keys())}"
raise ValueError(m)
def mnemonic_from_entropy(
entropy: Entropy, version_str: str = "standard", lang: str = "en"
) -> Mnemonic:
"""Convert input entropy to Electrum versioned mnemonic sentence.
Input entropy can be expressed as
binary 0/1 string, bytes-like, or integer.
In the case of binary 0/1 string and bytes-like,
leading zeros are considered redundant padding.
"""
if version_str not in _MNEMONIC_VERSIONS:
m = f"unknown electrum mnemonic version: '{version_str}'; "
m += f"not in {list(_MNEMONIC_VERSIONS.keys())}"
raise ValueError(m)
version = _MNEMONIC_VERSIONS[version_str]
binstr_entropy = binstr_from_entropy(entropy)
int_entropy = int(binstr_entropy, 2)
base = _wordlists.language_length(lang)
invalid = True
while invalid:
# electrum considers entropy as integer, losing any leading zero
# so the value of binstr_entropy before the while must be updated
nbits = int_entropy.bit_length()
binstr_entropy = binstr_from_entropy(int_entropy, nbits)
indexes = _indexes_from_entropy(binstr_entropy, base)
mnemonic = _mnemonic_from_indexes(indexes, lang)
# version validity check
s = hmac.new(b"Seed version", mnemonic.encode(), sha512).hexdigest()
if s.startswith(version):
invalid = False
# next trial
int_entropy += 1
return mnemonic
def entropy_from_mnemonic(mnemonic: Mnemonic, lang: str = "en") -> BinStr:
"Return the entropy from the Electrum versioned mnemonic sentence."
# verify that it is a valid Electrum mnemonic sentence
version_from_mnemonic(mnemonic)
indexes = _indexes_from_mnemonic(mnemonic, lang)
base = _wordlists.language_length(lang)
return _entropy_from_indexes(indexes, base)
def _seed_from_mnemonic(mnemonic: Mnemonic, passphrase: str) -> Tuple[str, bytes]:
"Return (version, seed) from the provided Electrum mnemonic."
# clean up mnemonic from spurious whitespaces
version, mnemonic = version_from_mnemonic(mnemonic)
hf_name = "sha512"
password = mnemonic.encode()
salt = ("electrum" + passphrase).encode()
iterations = 2048
dksize = 64
return version, pbkdf2_hmac(hf_name, password, salt, iterations, dksize)
| 33.007752
| 82
| 0.701738
|
3f2fb14e8e893774b003e34adcac03b390a8ca48
| 4,881
|
py
|
Python
|
modules/face_detection/lib/common/face.py
|
TestLEM/MagicMirror
|
45bbe9de71e65e68c65684200c07138777e3aed4
|
[
"MIT"
] | null | null | null |
modules/face_detection/lib/common/face.py
|
TestLEM/MagicMirror
|
45bbe9de71e65e68c65684200c07138777e3aed4
|
[
"MIT"
] | 1
|
2018-05-14T06:45:31.000Z
|
2018-05-14T06:45:31.000Z
|
modules/face_detection/lib/common/face.py
|
TestLEM/MagicMirror
|
45bbe9de71e65e68c65684200c07138777e3aed4
|
[
"MIT"
] | null | null | null |
# coding: utf8
"""Face Detection - MagicMirror Module
The MIT License (MIT)
Based on work by Paul-Vincent Roll (Copyright 2016) (MIT License)
Reference link - https://github.com/normyx/MMM-Facial-Recognition-OCV3
"""
import cv2
import sys
class FaceDetection:
'Face Detection Class'
def __init__(self,
haar_scale_factor,
haar_min_neighbors_face,
haar_min_size_face,
haar_faces_file=None,
haar_min_neighbors_eyes=None,
haar_min_size_eyes=None,
haar_eyes_file=None) :
self.haar_scale_factor = haar_scale_factor
self.haar_min_neighbors_face = haar_min_neighbors_face
self.haar_min_size_face = haar_min_size_face
self.haar_faces_file = haar_min_size_face
self.haar_min_neighbors_eyes = haar_min_neighbors_eyes
self.haar_min_size_eyes = haar_min_size_eyes
self.haar_eyes_file = haar_eyes_file
if haar_faces_file is not None : self.haar_faces = cv2.CascadeClassifier(haar_faces_file)
if haar_eyes_file is not None : self.haar_eyes = cv2.CascadeClassifier(haar_eyes_file)
def detect_single(self, image):
"""Return bounds (x, y, width, height) of detected face in grayscale image.
If no face or more than one face are detected, None is returned.
"""
faces = self.haar_faces.detectMultiScale(image,
scaleFactor=self.haar_scale_factor,
minNeighbors=self.haar_min_neighbors_face,
minSize=self.haar_min_size_face,
flags=cv2.CASCADE_SCALE_IMAGE)
if len(faces) != 1:
return None
return faces[0]
def detect_faces(self, image):
"""Return bounds (x, y, width, height) of detected face in grayscale image.
return all faces found in the image
"""
faces = self.haar_faces.detectMultiScale(image,
scaleFactor=self.haar_scale_factor,
minNeighbors=self.haar_min_neighbors_face,
minSize=self.haar_min_size_face,
flags=cv2.CASCADE_SCALE_IMAGE)
if len(faces) == 0:
return None
return faces
def detect_eyes(self, image):
eyes = self.haar_eyes.detectMultiScale(image,
scaleFactor=self.haar_scale_factor,
minNeighbors=self.haar_min_neighbors_eyes,
minSize=self.haar_min_size_eyes,
flags=cv2.CASCADE_SCALE_IMAGE)
return eyes
def eyes_to_face(self, eyes):
"""Return bounds (x, y, width, height) of estimated face location based
on the location of a pair of eyes.
TODO: Sort through multiple eyes (> 2) to find pairs and detect multiple
faces.
"""
if (len(eyes) != 2):
print("Don't know what to do with {0} eye(s).".format(len(eyes)))
for eye in eyes:
print('{0:4d} {1:4d} {2:3d} {3:3d}'
.format(eye[0], eye[1], eye[2], eye[3]))
return None
x0, y0, w0, h0 = eyes[0]
x1, y1, w1, h1 = eyes[1]
# compute centered coordinates for the eyes and face
cx0 = x0 + int(0.5*w0)
cx1 = x1 + int(0.5*w1)
cy0 = y0 + int(0.5*h0)
cy1 = y1 + int(0.5*h1)
left_cx = min(cx0, cx1)
right_cx = max(cx0, cx1)
x_face_center = int((left_cx + right_cx)/2)
y_face_center = int((cy0 + cy1)/2)
eye_width = right_cx - left_cx
# eye_width is about 2/5 the total face width
# and 2/6 the total height
w = int(5 * eye_width / 2)
h = int(3 * eye_width)
x = max(0, x_face_center - int(1.25 * eye_width))
y = max(0, y_face_center - int(1.5 * eye_width))
return [[x, y, w, h]]
def crop(self, image, x, y, w, h, crop_height):
"""Crop box defined by x, y (upper left corner) and w, h (width and height)
to an image with the same aspect ratio as the face training data. Might
return a smaller crop if the box is near the edge of the image.
"""
midy = y + h / 2
y1 = int(max(0, midy - crop_height / 2))
y2 = int(min(image.shape[0] - 1, midy + crop_height / 2))
return image[y1:y2, x:x + w]
def resize(self, image, face_width, face_height):
"""Resize a face image to the proper size for training and detection.
"""
return cv2.resize(image, (face_width, face_height), interpolation=cv2.INTER_LANCZOS4)
| 42.077586
| 97
| 0.563204
|
db3569b6a400d6d819528d299bef52118e849072
| 1,548
|
py
|
Python
|
pypy/module/pypyjit/test_pypy_c/test_thread.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2016-07-06T23:30:20.000Z
|
2017-05-30T15:59:31.000Z
|
pypy/module/pypyjit/test_pypy_c/test_thread.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | null | null | null |
pypy/module/pypyjit/test_pypy_c/test_thread.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2020-07-09T08:14:22.000Z
|
2021-01-15T18:01:25.000Z
|
from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
class TestThread(BaseTestPyPyC):
def test_simple(self):
def main(n):
import thread
def f():
i = 0
while i < n:
i += 1
done.release()
done = thread.allocate_lock()
done.acquire()
thread.start_new_thread(f, ())
done.acquire()
return 0
log = self.run(main, [500])
assert round(log.result, 6) == round(main(500), 6)
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i2 = int_lt(i0, i1)
guard_true(i2, descr=...)
i3 = int_add(i0, 1)
--THREAD-TICK--
jump(..., descr=...)
""")
def test_tls(self):
def main(n):
import thread
local = thread._local()
local.x = 1
i = 0
while i < n:
i += local.x
return 0
log = self.run(main, [500])
assert round(log.result, 6) == round(main(500), 6)
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i53 = int_lt(i48, i27)
guard_true(i53, descr=...)
i54 = int_add_ovf(i48, i47)
guard_no_overflow(descr=...)
--TICK--
i58 = arraylen_gc(p43, descr=...)
jump(p0, p1, p3, p5, p10, p12, p14, i54, i27, i47, p45, p43, descr=...)
""")
| 30.352941
| 83
| 0.46447
|
b73bf83dff0169b1857156ab42c8351a1397a486
| 4,652
|
py
|
Python
|
skhep/math/kinematics.py
|
AdvaitDhingra/scikit-hep
|
1e08901447c3e42e4c4b703ea6c09e3bd0db25af
|
[
"BSD-3-Clause"
] | 150
|
2016-11-14T14:09:29.000Z
|
2022-03-18T16:37:03.000Z
|
skhep/math/kinematics.py
|
AdvaitDhingra/scikit-hep
|
1e08901447c3e42e4c4b703ea6c09e3bd0db25af
|
[
"BSD-3-Clause"
] | 123
|
2017-01-30T10:03:04.000Z
|
2022-03-31T06:26:09.000Z
|
skhep/math/kinematics.py
|
AdvaitDhingra/scikit-hep
|
1e08901447c3e42e4c4b703ea6c09e3bd0db25af
|
[
"BSD-3-Clause"
] | 41
|
2017-01-11T11:42:56.000Z
|
2021-12-06T22:38:32.000Z
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Mathematical functions relevant to kinematics
=============================================
"""
# -----------------------------------------------------------------------------
# Import statements
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from hepunits.units import MeV, ns
from hepunits.constants import hbar
from .numeric import isequal
def Kallen_function(x, y, z):
"""
The Kallen function, aka triangle or lambda function,
named after physicist Anders Olof Gunnar Kallen [Kallen]_:
.. math::
\\begin{eqnarray}
\\lambda(x,y,z) &=& x^2 + y^2 + z^2 - 2 x y - 2 y z - 2 z x \\\\
&=& (x-y-z)^2 - 4 y z \\\\
&=& [ x - (\\sqrt{y}+\\sqrt{z})^2 ] [ x - (\\sqrt{y}-\\sqrt{z})^2 ] \\,\\,\\,\\mathrm{if} \\,\\,\\,y, z > 0
\\end{eqnarray}
Example
-------
Calculate in the rest frame of a particle of mass M decaying to 2 particles labeled 1 and 2,
:math:`P (M) \\to p1 (m1) + p2 (m2)`, the momenta of 1 and 2 given by :math:`p = |\\mathbf{p1}| = |\\mathbf{p2}|`:
>>> from skhep.math import Kallen_function
>>> from hepunits.units import MeV, GeV
>>> from math import sqrt
>>> M = 5.279 * GeV; m1 = 493.7 * MeV; m2 = 139.6 * MeV
>>> p = sqrt( Kallen_function( M**2, m1**2, m2**2 ) ) / (2*M)
>>> print p / GeV # print the CMS momentum in GeV
2.61453580221
References
----------
.. [Kallen] https://en.wikipedia.org/wiki/K%C3%A4ll%C3%A9n_function
"""
return (x - y - z) ** 2 - 4 * y * z # faster to evaluate condensed form used
def Armenteros_Podolanski_variables(pplus_3Dvec, pminus_3Dvec):
"""
Calculate the Armenteros Podolanski [APPaper]_ variables :math:`(\\alpha,q_T)` for a 2-body decay.
Definition
----------
.. math::
\\alpha = \\frac{p_L^+ - p_L^-}{p_L^+ + p_L^-}
q_T = \\frac{| p^- \\times p^{\\mathrm mother}|}{|p^{\\mathrm mother}|}
where the longitudinal momentum along the direction of flight of the mother particle is
.. math::
p_L^\\pm = \\frac{p^\\pm \\cdot p^{\\mathrm mother}}{|p^{\\mathrm mother}|}
and :math:`q_T` is the transverse momentum of the daughter particles with respect
to the direction of flight of the mother particle.
These expressions can be simplified to
.. math::
\\alpha = \\frac{|p^+|^2 - |p^-|^2}{|p^+ + p^-|^2}
q_T = \\frac{| p^+ \\times p^- |}{|p^+ + p^-|}
Parameters
-----------
pplus_3Dvec : Vector3D
3D-momentum vector of the positively-charged daughter particle.
pminus_3Dvec : Vector3D
3D-momentum vector of the negatively-charged daughter particle.
Returns
-------
Tuple :math:`(\\alpha,q_T)`.
References
----------
.. [APPaper] J. Podolanski and R. Armenteros, III. Analysis of V-events, The London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science 45 (1954) 13, http://dx.doi.org/10.1080/14786440108520416
"""
mother_mag = (pplus_3Dvec + pminus_3Dvec).mag
if isequal(mother_mag, 0.0):
raise ValueError("Total momentum has zero magnitude!")
# Longitudinal momentum asymmetry, i.e. imbalance
alpha = (pplus_3Dvec.mag2 - pminus_3Dvec.mag2) / mother_mag ** 2
# Transverse momentum of positively-charged particle along the mother particle momentum direction
qT = (pplus_3Dvec.cross(pminus_3Dvec)).mag / mother_mag
return (qT, alpha)
def lifetime_to_width(tau):
"""Convert from a particle lifetime to a decay width.
Parameters
-----------
tau : float > 0
Particle lifetime, typically in picoseconds (any HEP time unit is OK).
Returns
-------
Particle decay width, in the HEP standard energy unit MeV.
"""
if tau <= 0:
raise ValueError("Input provided, %s <= 0!".format(tau))
# Just need to first make sure that the lifetime is in the standard unit ns
return hbar / float(tau / ns)
def width_to_lifetime(Gamma):
"""Convert from a particle decay width to a lifetime.
Parameters
----------
Gamma : float > 0
Particle decay width, typically in MeV (any HEP energy unit is OK).
Returns
-------
Particle lifetime, in the HEP standard time unit ns.
"""
if Gamma <= 0.0:
raise ValueError("Input provided, %s <= 0!".format(Gamma))
# Just need to first make sure that the width is in the standard unit MeV
return hbar / float(Gamma / MeV)
| 31.221477
| 214
| 0.577171
|
23fc7d0ab823421072cdd9518fa3a12ec0b8c149
| 715
|
py
|
Python
|
mysite/client/views.py
|
willem-vanheemstrasystems/django-frontend
|
01d9918577fb864b0d1c12d459328b6aac041002
|
[
"MIT"
] | null | null | null |
mysite/client/views.py
|
willem-vanheemstrasystems/django-frontend
|
01d9918577fb864b0d1c12d459328b6aac041002
|
[
"MIT"
] | null | null | null |
mysite/client/views.py
|
willem-vanheemstrasystems/django-frontend
|
01d9918577fb864b0d1c12d459328b6aac041002
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
# All Django wants returned is an HttpResponse. Or an exception.
def index(request):
# The code below loads the template called client/index.html
# and passes it a context.
# The context is a dictionary mapping template variable names to Python objects.
context = {}
# The render() function takes the request object as its first argument,
# a template name as its second argument
# and a dictionary as its optional third argument.
# It returns an HttpResponse object of the given template rendered with the given context.
return render(request, 'client/index.html', context)
| 42.058824
| 94
| 0.744056
|
a437761b3c790f80198068cdb15c0f56b2956207
| 2,268
|
py
|
Python
|
tempest/exceptions.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | null | null | null |
tempest/exceptions.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | null | null | null |
tempest/exceptions.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import exceptions
class BuildErrorException(exceptions.TempestException):
message = "Server %(server_id)s failed to build and is in ERROR status"
class SnapshotNotFoundException(exceptions.TempestException):
message = "Server snapshot image %(image_id)s not found."
class ImageKilledException(exceptions.TempestException):
message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
class AddImageException(exceptions.TempestException):
message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
class VolumeResourceBuildErrorException(exceptions.TempestException):
message = ("%(resource_name)s %(resource_id)s failed to build and is in "
"ERROR status")
class VolumeRestoreErrorException(exceptions.TempestException):
message = "Volume %(volume_id)s failed to restore and is in ERROR status"
class StackBuildErrorException(exceptions.TempestException):
message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
"due to '%(stack_status_reason)s'")
class ServerUnreachable(exceptions.TempestException):
message = ("Server %(server_id)s is not reachable via "
"the configured network")
# NOTE(andreaf) This exception is added here to facilitate the migration
# of get_network_from_name and preprov_creds to tempest.lib, and it should
# be migrated along with them
class InvalidTestResource(exceptions.TempestException):
message = "%(name)s is not a valid %(type)s, or the name is ambiguous"
class RFCViolation(exceptions.RestClientException):
message = "RFC Violation"
| 35.4375
| 79
| 0.746473
|
39bc8903a08fd25a5772e278aa2856177dd94f0e
| 632
|
py
|
Python
|
src/odontology/person/migrations/0026_auto_20160228_0216.py
|
nanomolina/JP
|
248a47bced4dac850f85d28968ddf279cd123400
|
[
"Apache-2.0"
] | 2
|
2016-06-23T15:35:29.000Z
|
2022-01-11T00:55:21.000Z
|
src/odontology/person/migrations/0026_auto_20160228_0216.py
|
nanomolina/JP
|
248a47bced4dac850f85d28968ddf279cd123400
|
[
"Apache-2.0"
] | 27
|
2016-06-24T12:28:01.000Z
|
2022-01-13T00:37:25.000Z
|
src/odontology/person/migrations/0026_auto_20160228_0216.py
|
nanomolina/JP
|
248a47bced4dac850f85d28968ddf279cd123400
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-28 05:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('person', '0025_auto_20160227_1505'),
]
operations = [
migrations.AlterField(
model_name='dentist',
name='register_number',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='patient',
name='tel',
field=models.CharField(blank=True, max_length=250, null=True),
),
]
| 24.307692
| 74
| 0.599684
|
c6e41d2a0c3a4e9209c96ede09d530a7387f2061
| 8,693
|
py
|
Python
|
atom/nucleus/python/nucleus_api/models/page_audit_log.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/models/page_audit_log.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/models/page_audit_log.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.5
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from nucleus_api.configuration import Configuration
class PageAuditLog(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'content': 'list[AuditLog]',
'first': 'bool',
'last': 'bool',
'number': 'int',
'number_of_elements': 'int',
'size': 'int',
'sort': 'list[Sort]',
'total_elements': 'int',
'total_pages': 'int'
}
attribute_map = {
'content': 'content',
'first': 'first',
'last': 'last',
'number': 'number',
'number_of_elements': 'number_of_elements',
'size': 'size',
'sort': 'sort',
'total_elements': 'total_elements',
'total_pages': 'total_pages'
}
def __init__(self, content=None, first=None, last=None, number=None, number_of_elements=None, size=None, sort=None, total_elements=None, total_pages=None, _configuration=None): # noqa: E501
"""PageAuditLog - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._content = None
self._first = None
self._last = None
self._number = None
self._number_of_elements = None
self._size = None
self._sort = None
self._total_elements = None
self._total_pages = None
self.discriminator = None
if content is not None:
self.content = content
if first is not None:
self.first = first
if last is not None:
self.last = last
if number is not None:
self.number = number
if number_of_elements is not None:
self.number_of_elements = number_of_elements
if size is not None:
self.size = size
if sort is not None:
self.sort = sort
if total_elements is not None:
self.total_elements = total_elements
if total_pages is not None:
self.total_pages = total_pages
@property
def content(self):
"""Gets the content of this PageAuditLog. # noqa: E501
:return: The content of this PageAuditLog. # noqa: E501
:rtype: list[AuditLog]
"""
return self._content
@content.setter
def content(self, content):
"""Sets the content of this PageAuditLog.
:param content: The content of this PageAuditLog. # noqa: E501
:type: list[AuditLog]
"""
self._content = content
@property
def first(self):
"""Gets the first of this PageAuditLog. # noqa: E501
:return: The first of this PageAuditLog. # noqa: E501
:rtype: bool
"""
return self._first
@first.setter
def first(self, first):
"""Sets the first of this PageAuditLog.
:param first: The first of this PageAuditLog. # noqa: E501
:type: bool
"""
self._first = first
@property
def last(self):
"""Gets the last of this PageAuditLog. # noqa: E501
:return: The last of this PageAuditLog. # noqa: E501
:rtype: bool
"""
return self._last
@last.setter
def last(self, last):
"""Sets the last of this PageAuditLog.
:param last: The last of this PageAuditLog. # noqa: E501
:type: bool
"""
self._last = last
@property
def number(self):
"""Gets the number of this PageAuditLog. # noqa: E501
:return: The number of this PageAuditLog. # noqa: E501
:rtype: int
"""
return self._number
@number.setter
def number(self, number):
"""Sets the number of this PageAuditLog.
:param number: The number of this PageAuditLog. # noqa: E501
:type: int
"""
self._number = number
@property
def number_of_elements(self):
"""Gets the number_of_elements of this PageAuditLog. # noqa: E501
:return: The number_of_elements of this PageAuditLog. # noqa: E501
:rtype: int
"""
return self._number_of_elements
@number_of_elements.setter
def number_of_elements(self, number_of_elements):
"""Sets the number_of_elements of this PageAuditLog.
:param number_of_elements: The number_of_elements of this PageAuditLog. # noqa: E501
:type: int
"""
self._number_of_elements = number_of_elements
@property
def size(self):
"""Gets the size of this PageAuditLog. # noqa: E501
:return: The size of this PageAuditLog. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this PageAuditLog.
:param size: The size of this PageAuditLog. # noqa: E501
:type: int
"""
self._size = size
@property
def sort(self):
"""Gets the sort of this PageAuditLog. # noqa: E501
:return: The sort of this PageAuditLog. # noqa: E501
:rtype: list[Sort]
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this PageAuditLog.
:param sort: The sort of this PageAuditLog. # noqa: E501
:type: list[Sort]
"""
self._sort = sort
@property
def total_elements(self):
"""Gets the total_elements of this PageAuditLog. # noqa: E501
:return: The total_elements of this PageAuditLog. # noqa: E501
:rtype: int
"""
return self._total_elements
@total_elements.setter
def total_elements(self, total_elements):
"""Sets the total_elements of this PageAuditLog.
:param total_elements: The total_elements of this PageAuditLog. # noqa: E501
:type: int
"""
self._total_elements = total_elements
@property
def total_pages(self):
"""Gets the total_pages of this PageAuditLog. # noqa: E501
:return: The total_pages of this PageAuditLog. # noqa: E501
:rtype: int
"""
return self._total_pages
@total_pages.setter
def total_pages(self, total_pages):
"""Sets the total_pages of this PageAuditLog.
:param total_pages: The total_pages of this PageAuditLog. # noqa: E501
:type: int
"""
self._total_pages = total_pages
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PageAuditLog, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PageAuditLog):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PageAuditLog):
return True
return self.to_dict() != other.to_dict()
| 26.183735
| 194
| 0.573105
|
c705c1a6771d1e6e244cdaf8366f687a2223de24
| 2,677
|
py
|
Python
|
src/compas_rhino/conduits/labels.py
|
XingxinHE/compas
|
d2901dbbacdaf4694e5adae78ba8f093f10532bf
|
[
"MIT"
] | null | null | null |
src/compas_rhino/conduits/labels.py
|
XingxinHE/compas
|
d2901dbbacdaf4694e5adae78ba8f093f10532bf
|
[
"MIT"
] | null | null | null |
src/compas_rhino/conduits/labels.py
|
XingxinHE/compas
|
d2901dbbacdaf4694e5adae78ba8f093f10532bf
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from System.Drawing.Color import FromArgb
from Rhino.Geometry import Point3d
from compas_rhino.conduits.base import BaseConduit
from compas.utilities import color_to_rgb
__all__ = ['LabelsConduit']
class LabelsConduit(BaseConduit):
"""A Rhino display conduit for labels.
Parameters
----------
labels : list of 2-tuple
A list of label tuples.
Each tuple contains a position and text for the label.
color : list of 2-tuple, optional
The colors of the labels.
Each color is a tuple with a background color and a text color.
Default is ``((0, 0, 0), (255, 255, 255))`` for all labels.
Attributes
----------
color : list of RGB colors
A color specification per label.
labels : list
A list of label tuples.
Each tuple contains a position and text for the label.
Examples
--------
.. code-block:: python
from random import randint
from compas_rhino.conduits import LabelsConduit
labels = [([1.0 * randint(0, 100), 1.0 * randint(0, 100), 0.0], str(i)) for i in range(100)]
conduit = LabelsConduit(labels)
with conduit.enabled():
for i in range(100):
conduit.labels = [([1.0 * randint(0, 100), 1.0 * randint(0, 100), 0.0], str(i)) for i in range(100)]
conduit.redraw(pause=0.1)
"""
def __init__(self, labels, color=None, **kwargs):
super(LabelsConduit, self).__init__(**kwargs)
self._default_color = FromArgb(0, 0, 0)
self._default_textcolor = FromArgb(255, 255, 255)
self._color = None
self.labels = labels or []
self.color = color
@property
def color(self):
return self._colors
@color.setter
def color(self, color):
if color:
color[:] = [(FromArgb(* color_to_rgb(bgc)), FromArgb(* color_to_rgb(tc))) for bgc, tc in color]
l = len(self.labels) # noqa: E741
c = len(color)
if c < l:
color += [(self._default_color, self._default_textcolor) for i in range(l - c)]
elif c > l:
color[:] = color[:l]
self._color = color
def DrawForeground(self, e):
for i, (pos, text) in enumerate(self.labels):
if self.color:
color, textcolor = self.color[i]
e.Display.DrawDot(Point3d(*pos), text, color, textcolor)
else:
e.Display.DrawDot(Point3d(*pos), text, self._default_color, self._default_textcolor)
| 31.869048
| 116
| 0.598058
|
ae808d6d8dae482f65da07c13b4c6de824a23c55
| 38,361
|
py
|
Python
|
distributed/comm/tests/test_comms.py
|
aiudirog/distributed
|
d956ef6b7ae09634fe9307e9da1e2de1fc487e8d
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/comm/tests/test_comms.py
|
aiudirog/distributed
|
d956ef6b7ae09634fe9307e9da1e2de1fc487e8d
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/comm/tests/test_comms.py
|
aiudirog/distributed
|
d956ef6b7ae09634fe9307e9da1e2de1fc487e8d
|
[
"BSD-3-Clause"
] | null | null | null |
import asyncio
import os
import sys
import threading
import types
import warnings
from functools import partial
import pkg_resources
import pytest
from tornado import ioloop
from tornado.concurrent import Future
import dask
import distributed
from distributed.comm import (
CommClosedError,
asyncio_tcp,
connect,
get_address_host,
get_local_address_for,
inproc,
listen,
parse_address,
parse_host_port,
resolve_address,
unparse_host_port,
)
from distributed.comm.registry import backends, get_backend
from distributed.metrics import time
from distributed.protocol import Serialized, deserialize, serialize, to_serialize
from distributed.utils import get_ip, get_ipv6
from distributed.utils_test import (
get_cert,
get_client_ssl_context,
get_server_ssl_context,
has_ipv6,
requires_ipv6,
)
EXTERNAL_IP4 = get_ip()
if has_ipv6():
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
EXTERNAL_IP6 = get_ipv6()
@pytest.fixture(params=["tornado", "asyncio"])
def tcp(monkeypatch, request):
"""Set the TCP backend to either tornado or asyncio"""
if request.param == "tornado":
import distributed.comm.tcp as tcp
else:
import distributed.comm.asyncio_tcp as tcp
monkeypatch.setitem(backends, "tcp", tcp.TCPBackend())
monkeypatch.setitem(backends, "tls", tcp.TLSBackend())
return tcp
ca_file = get_cert("tls-ca-cert.pem")
# The Subject field of our test certs
cert_subject = (
(("countryName", "XY"),),
(("localityName", "Dask-distributed"),),
(("organizationName", "Dask"),),
(("commonName", "localhost"),),
)
def check_tls_extra(info):
assert isinstance(info, dict)
assert info["peercert"]["subject"] == cert_subject
assert "cipher" in info
cipher_name, proto_name, secret_bits = info["cipher"]
# Most likely
assert "AES" in cipher_name
assert "TLS" in proto_name
assert secret_bits >= 128
tls_kwargs = dict(
listen_args={"ssl_context": get_server_ssl_context()},
connect_args={"ssl_context": get_client_ssl_context()},
)
@pytest.mark.asyncio
async def get_comm_pair(listen_addr, listen_args={}, connect_args={}, **kwargs):
q = asyncio.Queue()
async def handle_comm(comm):
await q.put(comm)
listener = await listen(listen_addr, handle_comm, **listen_args, **kwargs)
comm = await connect(listener.contact_address, **connect_args, **kwargs)
serv_comm = await q.get()
return (comm, serv_comm)
def get_tcp_comm_pair(**kwargs):
return get_comm_pair("tcp://", **kwargs)
def get_tls_comm_pair(**kwargs):
kwargs.update(tls_kwargs)
return get_comm_pair("tls://", **kwargs)
def get_inproc_comm_pair(**kwargs):
return get_comm_pair("inproc://", **kwargs)
async def debug_loop():
"""
Debug helper
"""
while True:
loop = ioloop.IOLoop.current()
print(".", loop, loop._handlers)
await asyncio.sleep(0.5)
#
# Test utility functions
#
def test_parse_host_port(tcp):
f = parse_host_port
assert f("localhost:123") == ("localhost", 123)
assert f("127.0.0.1:456") == ("127.0.0.1", 456)
assert f("localhost:123", 80) == ("localhost", 123)
assert f("localhost", 80) == ("localhost", 80)
with pytest.raises(ValueError):
f("localhost")
assert f("[::1]:123") == ("::1", 123)
assert f("[fe80::1]:123", 80) == ("fe80::1", 123)
assert f("[::1]", 80) == ("::1", 80)
with pytest.raises(ValueError):
f("[::1]")
with pytest.raises(ValueError):
f("::1:123")
with pytest.raises(ValueError):
f("::1")
def test_unparse_host_port(tcp):
f = unparse_host_port
assert f("localhost", 123) == "localhost:123"
assert f("127.0.0.1", 123) == "127.0.0.1:123"
assert f("::1", 123) == "[::1]:123"
assert f("[::1]", 123) == "[::1]:123"
assert f("127.0.0.1") == "127.0.0.1"
assert f("127.0.0.1", None) == "127.0.0.1"
assert f("127.0.0.1", "*") == "127.0.0.1:*"
assert f("::1") == "[::1]"
assert f("[::1]") == "[::1]"
assert f("::1", "*") == "[::1]:*"
def test_get_address_host(tcp):
f = get_address_host
assert f("tcp://127.0.0.1:123") == "127.0.0.1"
assert f("inproc://%s/%d/123" % (get_ip(), os.getpid())) == get_ip()
def test_resolve_address(tcp):
f = resolve_address
assert f("tcp://127.0.0.1:123") == "tcp://127.0.0.1:123"
assert f("127.0.0.2:789") == "tcp://127.0.0.2:789"
assert f("tcp://0.0.0.0:456") == "tcp://0.0.0.0:456"
assert f("tcp://0.0.0.0:456") == "tcp://0.0.0.0:456"
if has_ipv6():
assert f("tcp://[::1]:123") == "tcp://[::1]:123"
assert f("tls://[::1]:123") == "tls://[::1]:123"
# OS X returns '::0.0.0.2' as canonical representation
assert f("[::2]:789") in ("tcp://[::2]:789", "tcp://[::0.0.0.2]:789")
assert f("tcp://[::]:123") == "tcp://[::]:123"
assert f("localhost:123") == "tcp://127.0.0.1:123"
assert f("tcp://localhost:456") == "tcp://127.0.0.1:456"
assert f("tls://localhost:456") == "tls://127.0.0.1:456"
def test_get_local_address_for(tcp):
f = get_local_address_for
assert f("tcp://127.0.0.1:80") == "tcp://127.0.0.1"
assert f("tcp://8.8.8.8:4444") == "tcp://" + get_ip()
if has_ipv6():
assert f("tcp://[::1]:123") == "tcp://[::1]"
inproc_arg = "inproc://%s/%d/444" % (get_ip(), os.getpid())
inproc_res = f(inproc_arg)
assert inproc_res.startswith("inproc://")
assert inproc_res != inproc_arg
#
# Test concrete transport APIs
#
@pytest.mark.asyncio
async def test_tcp_listener_does_not_call_handler_on_handshake_error(tcp):
handle_comm_called = False
async def handle_comm(comm):
nonlocal handle_comm_called
handle_comm_called = True
with dask.config.set({"distributed.comm.timeouts.connect": 0.01}):
listener = await tcp.TCPListener("127.0.0.1", handle_comm)
host, port = listener.get_host_port()
# connect without handshake:
reader, writer = await asyncio.open_connection(host=host, port=port)
# wait a bit to let the listener side hit the timeout on the handshake:
await asyncio.sleep(0.02)
assert not handle_comm_called
writer.close()
await writer.wait_closed()
@pytest.mark.asyncio
async def test_tcp_specific(tcp):
"""
Test concrete TCP API.
"""
async def handle_comm(comm):
assert comm.peer_address.startswith("tcp://" + host)
assert comm.extra_info == {}
msg = await comm.read()
msg["op"] = "pong"
await comm.write(msg)
await comm.close()
listener = await tcp.TCPListener("127.0.0.1", handle_comm)
host, port = listener.get_host_port()
assert host in ("localhost", "127.0.0.1", "::1")
assert port > 0
l = []
async def client_communicate(key, delay=0):
addr = "%s:%d" % (host, port)
comm = await connect(listener.contact_address)
assert comm.peer_address == "tcp://" + addr
assert comm.extra_info == {}
await comm.write({"op": "ping", "data": key})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
await comm.close()
await client_communicate(key=1234)
# Many clients at once
N = 100
futures = [client_communicate(key=i, delay=0.05) for i in range(N)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(N))
@pytest.mark.asyncio
async def test_tls_specific(tcp):
"""
Test concrete TLS API.
"""
async def handle_comm(comm):
assert comm.peer_address.startswith("tls://" + host)
check_tls_extra(comm.extra_info)
msg = await comm.read()
msg["op"] = "pong"
await comm.write(msg)
await comm.close()
server_ctx = get_server_ssl_context()
client_ctx = get_client_ssl_context()
listener = await tcp.TLSListener("127.0.0.1", handle_comm, ssl_context=server_ctx)
host, port = listener.get_host_port()
assert host in ("localhost", "127.0.0.1", "::1")
assert port > 0
l = []
async def client_communicate(key, delay=0):
addr = "%s:%d" % (host, port)
comm = await connect(listener.contact_address, ssl_context=client_ctx)
assert comm.peer_address == "tls://" + addr
check_tls_extra(comm.extra_info)
await comm.write({"op": "ping", "data": key})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
await comm.close()
await client_communicate(key=1234)
# Many clients at once
N = 100
futures = [client_communicate(key=i, delay=0.05) for i in range(N)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(N))
@pytest.mark.asyncio
async def test_comm_failure_threading(tcp):
"""
When we fail to connect, make sure we don't make a lot
of threads.
We only assert for PY3, because the thread limit only is
set for python 3. See github PR #2403 discussion for info.
"""
if tcp is asyncio_tcp:
pytest.skip("not applicable for asyncio")
async def sleep_for_60ms():
max_thread_count = 0
for x in range(60):
await asyncio.sleep(0.001)
thread_count = threading.active_count()
if thread_count > max_thread_count:
max_thread_count = thread_count
return max_thread_count
original_thread_count = threading.active_count()
# tcp.TCPConnector()
sleep_future = sleep_for_60ms()
with pytest.raises(IOError):
await connect("tcp://localhost:28400", 0.052)
max_thread_count = await sleep_future
# 2 is the number set by BaseTCPConnector.executor (ThreadPoolExecutor)
assert max_thread_count <= 2 + original_thread_count
# tcp.TLSConnector()
sleep_future = sleep_for_60ms()
with pytest.raises(IOError):
await connect(
"tls://localhost:28400", 0.052, ssl_context=get_client_ssl_context()
)
max_thread_count = await sleep_future
assert max_thread_count <= 2 + original_thread_count
async def check_inproc_specific(run_client):
"""
Test concrete InProc API.
"""
listener_addr = inproc.global_manager.new_address()
addr_head = listener_addr.rpartition("/")[0]
client_addresses = set()
N_MSGS = 3
async def handle_comm(comm):
assert comm.peer_address.startswith("inproc://" + addr_head)
client_addresses.add(comm.peer_address)
for i in range(N_MSGS):
msg = await comm.read()
msg["op"] = "pong"
await comm.write(msg)
await comm.close()
listener = await inproc.InProcListener(listener_addr, handle_comm)
assert (
listener.listen_address
== listener.contact_address
== "inproc://" + listener_addr
)
l = []
async def client_communicate(key, delay=0):
comm = await connect(listener.contact_address)
assert comm.peer_address == "inproc://" + listener_addr
for i in range(N_MSGS):
await comm.write({"op": "ping", "data": key})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
with pytest.raises(CommClosedError):
await comm.read()
await comm.close()
client_communicate = partial(run_client, client_communicate)
await client_communicate(key=1234)
# Many clients at once
N = 20
futures = [client_communicate(key=i, delay=0.001) for i in range(N)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(N))
assert len(client_addresses) == N + 1
assert listener.contact_address not in client_addresses
def run_coro(func, *args, **kwargs):
return func(*args, **kwargs)
def run_coro_in_thread(func, *args, **kwargs):
fut = Future()
main_loop = ioloop.IOLoop.current()
def run():
thread_loop = ioloop.IOLoop() # need fresh IO loop for run_sync()
try:
res = thread_loop.run_sync(partial(func, *args, **kwargs), timeout=10)
except Exception:
main_loop.add_callback(fut.set_exc_info, sys.exc_info())
else:
main_loop.add_callback(fut.set_result, res)
finally:
thread_loop.close()
t = threading.Thread(target=run)
t.start()
return fut
@pytest.mark.asyncio
async def test_inproc_specific_same_thread():
await check_inproc_specific(run_coro)
@pytest.mark.asyncio
async def test_inproc_specific_different_threads():
await check_inproc_specific(run_coro_in_thread)
#
# Test communications through the abstract API
#
async def check_client_server(
addr,
check_listen_addr=None,
check_contact_addr=None,
listen_args={},
connect_args={},
):
"""
Abstract client / server test.
"""
async def handle_comm(comm):
scheme, loc = parse_address(comm.peer_address)
assert scheme == bound_scheme
msg = await comm.read()
assert msg["op"] == "ping"
msg["op"] = "pong"
await comm.write(msg)
msg = await comm.read()
assert msg["op"] == "foobar"
await comm.close()
# Arbitrary connection args should be ignored
listen_args = listen_args or {"xxx": "bar"}
connect_args = connect_args or {"xxx": "foo"}
listener = await listen(addr, handle_comm, **listen_args)
# Check listener properties
bound_addr = listener.listen_address
bound_scheme, bound_loc = parse_address(bound_addr)
assert bound_scheme in backends
assert bound_scheme == parse_address(addr)[0]
if check_listen_addr is not None:
check_listen_addr(bound_loc)
contact_addr = listener.contact_address
contact_scheme, contact_loc = parse_address(contact_addr)
assert contact_scheme == bound_scheme
if check_contact_addr is not None:
check_contact_addr(contact_loc)
else:
assert contact_addr == bound_addr
# Check client <-> server comms
l = []
async def client_communicate(key, delay=0):
comm = await connect(listener.contact_address, **connect_args)
assert comm.peer_address == listener.contact_address
await comm.write({"op": "ping", "data": key})
await comm.write({"op": "foobar"})
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
l.append(key)
await comm.close()
await client_communicate(key=1234)
# Many clients at once
futures = [client_communicate(key=i, delay=0.05) for i in range(20)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(20))
listener.stop()
@pytest.mark.gpu
@pytest.mark.asyncio
async def test_ucx_client_server():
pytest.importorskip("distributed.comm.ucx")
ucp = pytest.importorskip("ucp")
addr = ucp.get_address()
await check_client_server("ucx://" + addr)
def tcp_eq(expected_host, expected_port=None):
def checker(loc):
host, port = parse_host_port(loc)
assert host == expected_host
if expected_port is not None:
assert port == expected_port
else:
assert 1023 < port < 65536
return checker
tls_eq = tcp_eq
def inproc_check():
expected_ip = get_ip()
expected_pid = os.getpid()
def checker(loc):
ip, pid, suffix = loc.split("/")
assert ip == expected_ip
assert int(pid) == expected_pid
return checker
@pytest.mark.asyncio
async def test_default_client_server_ipv4(tcp):
# Default scheme is (currently) TCP
await check_client_server("127.0.0.1", tcp_eq("127.0.0.1"))
await check_client_server("127.0.0.1:3201", tcp_eq("127.0.0.1", 3201))
await check_client_server("0.0.0.0", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
"0.0.0.0:3202", tcp_eq("0.0.0.0", 3202), tcp_eq(EXTERNAL_IP4, 3202)
)
# IPv4 is preferred for the bound address
await check_client_server("", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
":3203", tcp_eq("0.0.0.0", 3203), tcp_eq(EXTERNAL_IP4, 3203)
)
@requires_ipv6
@pytest.mark.asyncio
async def test_default_client_server_ipv6(tcp):
await check_client_server("[::1]", tcp_eq("::1"))
await check_client_server("[::1]:3211", tcp_eq("::1", 3211))
await check_client_server("[::]", tcp_eq("::"), tcp_eq(EXTERNAL_IP6))
await check_client_server(
"[::]:3212", tcp_eq("::", 3212), tcp_eq(EXTERNAL_IP6, 3212)
)
@pytest.mark.asyncio
async def test_tcp_client_server_ipv4(tcp):
await check_client_server("tcp://127.0.0.1", tcp_eq("127.0.0.1"))
await check_client_server("tcp://127.0.0.1:3221", tcp_eq("127.0.0.1", 3221))
await check_client_server("tcp://0.0.0.0", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
"tcp://0.0.0.0:3222", tcp_eq("0.0.0.0", 3222), tcp_eq(EXTERNAL_IP4, 3222)
)
await check_client_server("tcp://", tcp_eq("0.0.0.0"), tcp_eq(EXTERNAL_IP4))
await check_client_server(
"tcp://:3223", tcp_eq("0.0.0.0", 3223), tcp_eq(EXTERNAL_IP4, 3223)
)
@requires_ipv6
@pytest.mark.asyncio
async def test_tcp_client_server_ipv6(tcp):
await check_client_server("tcp://[::1]", tcp_eq("::1"))
await check_client_server("tcp://[::1]:3231", tcp_eq("::1", 3231))
await check_client_server("tcp://[::]", tcp_eq("::"), tcp_eq(EXTERNAL_IP6))
await check_client_server(
"tcp://[::]:3232", tcp_eq("::", 3232), tcp_eq(EXTERNAL_IP6, 3232)
)
@pytest.mark.asyncio
async def test_tls_client_server_ipv4(tcp):
await check_client_server("tls://127.0.0.1", tls_eq("127.0.0.1"), **tls_kwargs)
await check_client_server(
"tls://127.0.0.1:3221", tls_eq("127.0.0.1", 3221), **tls_kwargs
)
await check_client_server(
"tls://", tls_eq("0.0.0.0"), tls_eq(EXTERNAL_IP4), **tls_kwargs
)
@requires_ipv6
@pytest.mark.asyncio
async def test_tls_client_server_ipv6(tcp):
await check_client_server("tls://[::1]", tls_eq("::1"), **tls_kwargs)
@pytest.mark.asyncio
async def test_inproc_client_server():
await check_client_server("inproc://", inproc_check())
await check_client_server(inproc.new_address(), inproc_check())
#
# TLS certificate handling
#
@pytest.mark.asyncio
async def test_tls_reject_certificate(tcp):
cli_ctx = get_client_ssl_context()
serv_ctx = get_server_ssl_context()
# These certs are not signed by our test CA
bad_cert_key = ("tls-self-signed-cert.pem", "tls-self-signed-key.pem")
bad_cli_ctx = get_client_ssl_context(*bad_cert_key)
bad_serv_ctx = get_server_ssl_context(*bad_cert_key)
async def handle_comm(comm):
scheme, loc = parse_address(comm.peer_address)
assert scheme == "tls"
await comm.close()
# Listener refuses a connector not signed by the CA
listener = await listen("tls://", handle_comm, ssl_context=serv_ctx)
with pytest.raises(EnvironmentError) as excinfo:
comm = await connect(
listener.contact_address, timeout=0.5, ssl_context=bad_cli_ctx
)
await comm.write({"x": "foo"}) # TODO: why is this necessary in Tornado 6 ?
if os.name != "nt":
try:
# See https://serverfault.com/questions/793260/what-does-tlsv1-alert-unknown-ca-mean
# assert "unknown ca" in str(excinfo.value)
pass
except AssertionError:
if os.name == "nt":
assert "An existing connection was forcibly closed" in str(
excinfo.value
)
else:
raise
# Sanity check
comm = await connect(listener.contact_address, timeout=2, ssl_context=cli_ctx)
await comm.close()
# Connector refuses a listener not signed by the CA
listener = await listen("tls://", handle_comm, ssl_context=bad_serv_ctx)
with pytest.raises(EnvironmentError) as excinfo:
await connect(listener.contact_address, timeout=2, ssl_context=cli_ctx)
# XXX: For asyncio this is just a timeout error
# assert "certificate verify failed" in str(excinfo.value.__cause__)
#
# Test communication closing
#
async def check_comm_closed_implicit(addr, delay=None, listen_args={}, connect_args={}):
async def handle_comm(comm):
await comm.close()
listener = await listen(addr, handle_comm, **listen_args)
comm = await connect(listener.contact_address, **connect_args)
with pytest.raises(CommClosedError):
await comm.write({})
await comm.read()
comm = await connect(listener.contact_address, **connect_args)
with pytest.raises(CommClosedError):
await comm.read()
@pytest.mark.asyncio
async def test_tcp_comm_closed_implicit(tcp):
await check_comm_closed_implicit("tcp://127.0.0.1")
@pytest.mark.asyncio
async def test_tls_comm_closed_implicit(tcp):
await check_comm_closed_implicit("tls://127.0.0.1", **tls_kwargs)
@pytest.mark.asyncio
async def test_inproc_comm_closed_implicit():
await check_comm_closed_implicit(inproc.new_address())
async def check_comm_closed_explicit(addr, listen_args={}, connect_args={}):
a, b = await get_comm_pair(addr, listen_args=listen_args, connect_args=connect_args)
a_read = a.read()
b_read = b.read()
await a.close()
# In-flight reads should abort with CommClosedError
with pytest.raises(CommClosedError):
await a_read
with pytest.raises(CommClosedError):
await b_read
# New reads as well
with pytest.raises(CommClosedError):
await a.read()
with pytest.raises(CommClosedError):
await b.read()
# And writes
with pytest.raises(CommClosedError):
await a.write({})
with pytest.raises(CommClosedError):
await b.write({})
await b.close()
@pytest.mark.asyncio
async def test_tcp_comm_closed_explicit(tcp):
await check_comm_closed_explicit("tcp://127.0.0.1")
@pytest.mark.xfail(
sys.version_info[:2] == (3, 7),
reason="This test fails on python 3.7 with certain versions of openssl",
)
@pytest.mark.asyncio
async def test_tls_comm_closed_explicit(tcp):
await check_comm_closed_explicit("tls://127.0.0.1", **tls_kwargs)
@pytest.mark.asyncio
async def test_inproc_comm_closed_explicit():
await check_comm_closed_explicit(inproc.new_address())
@pytest.mark.asyncio
async def test_inproc_comm_closed_explicit_2():
listener_errors = []
async def handle_comm(comm):
# Wait
try:
await comm.read()
except CommClosedError:
assert comm.closed()
listener_errors.append(True)
else:
await comm.close()
listener = await listen("inproc://", handle_comm)
comm = await connect(listener.contact_address)
await comm.close()
assert comm.closed()
start = time()
while len(listener_errors) < 1:
assert time() < start + 1
await asyncio.sleep(0.01)
assert len(listener_errors) == 1
with pytest.raises(CommClosedError):
await comm.read()
with pytest.raises(CommClosedError):
await comm.write("foo")
comm = await connect(listener.contact_address)
await comm.write("foo")
with pytest.raises(CommClosedError):
await comm.read()
with pytest.raises(CommClosedError):
await comm.write("foo")
assert comm.closed()
comm = await connect(listener.contact_address)
await comm.write("foo")
start = time()
while not comm.closed():
await asyncio.sleep(0.01)
assert time() < start + 2
await comm.close()
await comm.close()
@pytest.mark.asyncio
async def test_comm_closed_on_buffer_error(tcp):
# Internal errors from comm.stream.write, such as
# BufferError should lead to the stream being closed
# and not re-used. See GitHub #4133
if tcp is asyncio_tcp:
pytest.skip("Not applicable for asyncio")
reader, writer = await get_tcp_comm_pair()
def _write(data):
raise BufferError
writer.stream.write = _write
with pytest.raises(BufferError):
await writer.write("x")
assert writer.stream is None
await reader.close()
await writer.close()
#
# Various stress tests
#
async def echo(comm):
message = await comm.read()
await comm.write(message)
@pytest.mark.asyncio
async def test_retry_connect(tcp, monkeypatch):
async def echo(comm):
message = await comm.read()
await comm.write(message)
class UnreliableConnector(tcp.TCPConnector):
def __init__(self):
self.num_failures = 2
self.failures = 0
super().__init__()
async def connect(self, address, deserialize=True, **connection_args):
if self.failures > self.num_failures:
return await super().connect(address, deserialize, **connection_args)
else:
self.failures += 1
raise OSError()
class UnreliableBackend(tcp.TCPBackend):
_connector_class = UnreliableConnector
monkeypatch.setitem(backends, "tcp", UnreliableBackend())
listener = await listen("tcp://127.0.0.1:1234", echo)
try:
comm = await connect(listener.contact_address)
await comm.write(b"test")
msg = await comm.read()
assert msg == b"test"
finally:
listener.stop()
@pytest.mark.asyncio
async def test_handshake_slow_comm(tcp, monkeypatch):
class SlowComm(tcp.TCP):
def __init__(self, *args, delay_in_comm=0.5, **kwargs):
super().__init__(*args, **kwargs)
self.delay_in_comm = delay_in_comm
async def read(self, *args, **kwargs):
await asyncio.sleep(self.delay_in_comm)
return await super().read(*args, **kwargs)
async def write(self, *args, **kwargs):
await asyncio.sleep(self.delay_in_comm)
res = await super(type(self), self).write(*args, **kwargs)
return res
class SlowConnector(tcp.TCPConnector):
comm_class = SlowComm
class SlowBackend(tcp.TCPBackend):
def get_connector(self):
return SlowConnector()
monkeypatch.setitem(backends, "tcp", SlowBackend())
listener = await listen("tcp://127.0.0.1:1234", echo)
try:
comm = await connect(listener.contact_address)
await comm.write(b"test")
msg = await comm.read()
assert msg == b"test"
import dask
with dask.config.set({"distributed.comm.timeouts.connect": "100ms"}):
with pytest.raises(
IOError, match="Timed out during handshake while connecting to"
):
await connect(listener.contact_address)
finally:
listener.stop()
async def check_connect_timeout(addr):
t1 = time()
with pytest.raises(IOError):
await connect(addr, timeout=0.15)
dt = time() - t1
assert 1 >= dt >= 0.1
@pytest.mark.asyncio
async def test_tcp_connect_timeout(tcp):
await check_connect_timeout("tcp://127.0.0.1:44444")
@pytest.mark.asyncio
async def test_inproc_connect_timeout():
await check_connect_timeout(inproc.new_address())
async def check_many_listeners(addr):
async def handle_comm(comm):
pass
listeners = []
N = 100
for i in range(N):
listener = await listen(addr, handle_comm)
listeners.append(listener)
assert len({l.listen_address for l in listeners}) == N
assert len({l.contact_address for l in listeners}) == N
for listener in listeners:
listener.stop()
@pytest.mark.asyncio
async def test_tcp_many_listeners(tcp):
await check_many_listeners("tcp://127.0.0.1")
await check_many_listeners("tcp://0.0.0.0")
await check_many_listeners("tcp://")
@pytest.mark.asyncio
async def test_inproc_many_listeners():
await check_many_listeners("inproc://")
#
# Test deserialization
#
async def check_listener_deserialize(addr, deserialize, in_value, check_out):
q = asyncio.Queue()
async def handle_comm(comm):
try:
msg = await comm.read()
except Exception as exc:
q.put_nowait(exc)
else:
q.put_nowait(msg)
await comm.close()
async with listen(addr, handle_comm, deserialize=deserialize) as listener:
comm = await connect(listener.contact_address)
await comm.write(in_value)
out_value = await q.get()
if isinstance(out_value, Exception):
raise out_value # Prevents deadlocks, get actual deserialization exception
check_out(out_value)
await comm.close()
async def check_connector_deserialize(addr, deserialize, in_value, check_out):
done = asyncio.Event()
async def handle_comm(comm):
await comm.write(in_value)
await done.wait()
await comm.close()
async with listen(addr, handle_comm) as listener:
comm = await connect(listener.contact_address, deserialize=deserialize)
out_value = await comm.read()
done.set()
await comm.close()
check_out(out_value)
async def check_deserialize(addr):
"""
Check the "deserialize" flag on connect() and listen().
"""
# Test with Serialize and Serialized objects
msg = {
"op": "update",
"x": b"abc",
"to_ser": [to_serialize(123)],
"ser": Serialized(*serialize(456)),
}
msg_orig = msg.copy()
def check_out_false(out_value):
# Check output with deserialize=False
out_value = out_value.copy() # in case transport passed the object as-is
to_ser = out_value.pop("to_ser")
ser = out_value.pop("ser")
expected_msg = msg_orig.copy()
del expected_msg["ser"]
del expected_msg["to_ser"]
assert out_value == expected_msg
assert isinstance(ser, Serialized)
assert deserialize(ser.header, ser.frames) == 456
assert isinstance(to_ser, (tuple, list)) and len(to_ser) == 1
(to_ser,) = to_ser
# The to_serialize() value could have been actually serialized
# or not (it's a transport-specific optimization)
if isinstance(to_ser, Serialized):
assert deserialize(to_ser.header, to_ser.frames) == 123
else:
assert to_ser == to_serialize(123)
def check_out_true(out_value):
# Check output with deserialize=True
expected_msg = msg.copy()
expected_msg["ser"] = 456
expected_msg["to_ser"] = [123]
# Notice, we allow "to_ser" to be a tuple or a list
assert list(out_value.pop("to_ser")) == expected_msg.pop("to_ser")
assert out_value == expected_msg
await check_listener_deserialize(addr, False, msg, check_out_false)
await check_connector_deserialize(addr, False, msg, check_out_false)
await check_listener_deserialize(addr, True, msg, check_out_true)
await check_connector_deserialize(addr, True, msg, check_out_true)
# Test with long bytestrings, large enough to be transferred
# as a separate payload
# TODO: currently bytestrings are not transferred as a separate payload
_uncompressible = os.urandom(1024 ** 2) * 4 # end size: 8 MB
msg = {
"op": "update",
"x": _uncompressible,
"to_ser": (to_serialize(_uncompressible),),
"ser": Serialized(*serialize(_uncompressible)),
}
msg_orig = msg.copy()
def check_out(deserialize_flag, out_value):
# Check output with deserialize=False
assert sorted(out_value) == sorted(msg_orig)
out_value = out_value.copy() # in case transport passed the object as-is
to_ser = out_value.pop("to_ser")
ser = out_value.pop("ser")
expected_msg = msg_orig.copy()
del expected_msg["ser"]
del expected_msg["to_ser"]
assert out_value == expected_msg
if deserialize_flag:
assert isinstance(ser, (bytes, bytearray))
assert bytes(ser) == _uncompressible
else:
assert isinstance(ser, Serialized)
assert deserialize(ser.header, ser.frames) == _uncompressible
assert isinstance(to_ser, tuple) and len(to_ser) == 1
(to_ser,) = to_ser
# The to_serialize() value could have been actually serialized
# or not (it's a transport-specific optimization)
if isinstance(to_ser, Serialized):
assert deserialize(to_ser.header, to_ser.frames) == _uncompressible
else:
assert to_ser == to_serialize(_uncompressible)
await check_listener_deserialize(addr, False, msg, partial(check_out, False))
await check_connector_deserialize(addr, False, msg, partial(check_out, False))
await check_listener_deserialize(addr, True, msg, partial(check_out, True))
await check_connector_deserialize(addr, True, msg, partial(check_out, True))
@pytest.mark.asyncio
async def test_tcp_deserialize(tcp):
await check_deserialize("tcp://")
@pytest.mark.asyncio
async def test_inproc_deserialize():
await check_deserialize("inproc://")
async def check_deserialize_roundtrip(addr):
"""
Sanity check round-tripping with "deserialize" on and off.
"""
# Test with long bytestrings, large enough to be transferred
# as a separate payload
_uncompressible = os.urandom(1024 ** 2) * 4 # end size: 4 MB
msg = {
"op": "update",
"x": _uncompressible,
"to_ser": [to_serialize(_uncompressible)],
"ser": Serialized(*serialize(_uncompressible)),
}
for should_deserialize in (True, False):
a, b = await get_comm_pair(addr, deserialize=should_deserialize)
await a.write(msg)
got = await b.read()
await b.write(got)
got = await a.read()
assert sorted(got) == sorted(msg)
for k in ("op", "x"):
assert got[k] == msg[k]
if should_deserialize:
assert isinstance(got["to_ser"][0], (bytes, bytearray))
assert isinstance(got["ser"], (bytes, bytearray))
else:
assert isinstance(got["to_ser"][0], (to_serialize, Serialized))
assert isinstance(got["ser"], Serialized)
@pytest.mark.asyncio
async def test_inproc_deserialize_roundtrip():
await check_deserialize_roundtrip("inproc://")
@pytest.mark.asyncio
async def test_tcp_deserialize_roundtrip(tcp):
await check_deserialize_roundtrip("tcp://")
def _raise_eoferror():
raise EOFError
class _EOFRaising:
def __reduce__(self):
return _raise_eoferror, ()
async def check_deserialize_eoferror(addr):
"""
EOFError when deserializing should close the comm.
"""
async def handle_comm(comm):
await comm.write({"data": to_serialize(_EOFRaising())})
with pytest.raises(CommClosedError):
await comm.read()
async with listen(addr, handle_comm) as listener:
comm = await connect(listener.contact_address, deserialize=deserialize)
with pytest.raises(CommClosedError):
await comm.read()
@pytest.mark.asyncio
async def test_tcp_deserialize_eoferror(tcp):
await check_deserialize_eoferror("tcp://")
#
# Test various properties
#
async def check_repr(a, b):
assert "closed" not in repr(a)
assert "closed" not in repr(b)
await a.close()
assert "closed" in repr(a)
assert a.local_address in repr(a)
assert b.peer_address in repr(a)
await b.close()
assert "closed" in repr(b)
assert a.local_address in repr(b)
assert b.peer_address in repr(b)
@pytest.mark.asyncio
async def test_tcp_repr(tcp):
a, b = await get_tcp_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
await check_repr(a, b)
@pytest.mark.asyncio
async def test_tls_repr(tcp):
a, b = await get_tls_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
await check_repr(a, b)
@pytest.mark.asyncio
async def test_inproc_repr():
a, b = await get_inproc_comm_pair()
assert a.local_address in repr(b)
assert b.local_address in repr(a)
await check_repr(a, b)
async def check_addresses(a, b):
assert a.peer_address == b.local_address
assert a.local_address == b.peer_address
a.abort()
b.abort()
@pytest.mark.asyncio
async def test_tcp_adresses(tcp):
a, b = await get_tcp_comm_pair()
await check_addresses(a, b)
@pytest.mark.asyncio
async def test_tls_adresses(tcp):
a, b = await get_tls_comm_pair()
await check_addresses(a, b)
@pytest.mark.asyncio
async def test_inproc_adresses():
a, b = await get_inproc_comm_pair()
await check_addresses(a, b)
def test_register_backend_entrypoint():
# Code adapted from pandas backend entry point testing
# https://github.com/pandas-dev/pandas/blob/2470690b9f0826a8feb426927694fa3500c3e8d2/pandas/tests/plotting/test_backend.py#L50-L76
dist = pkg_resources.get_distribution("distributed")
if dist.module_path not in distributed.__file__:
# We are running from a non-installed distributed, and this test is invalid
pytest.skip("Testing a non-installed distributed")
mod = types.ModuleType("dask_udp")
mod.UDPBackend = lambda: 1
sys.modules[mod.__name__] = mod
entry_point_name = "distributed.comm.backends"
backends_entry_map = pkg_resources.get_entry_map("distributed")
if entry_point_name not in backends_entry_map:
backends_entry_map[entry_point_name] = dict()
backends_entry_map[entry_point_name]["udp"] = pkg_resources.EntryPoint(
"udp", mod.__name__, attrs=["UDPBackend"], dist=dist
)
# The require is disabled here since particularly unit tests may install
# dirty or dev versions which are conflicting with backend entrypoints if
# they are demanding for exact, stable versions. This should not fail the
# test
result = get_backend("udp", require=False)
assert result == 1
| 29.171863
| 134
| 0.649748
|
c4d0693a17d9d19cab8a1932f2dcca210fbfb618
| 3,343
|
py
|
Python
|
activelo/examples/suggestions.py
|
jzf2101/boardlaw
|
29126c2a6ab7f11154fb242c303d3b11f1566201
|
[
"MIT"
] | 20
|
2021-01-20T17:15:18.000Z
|
2022-01-25T21:51:29.000Z
|
activelo/examples/suggestions.py
|
jzf2101/boardlaw
|
29126c2a6ab7f11154fb242c303d3b11f1566201
|
[
"MIT"
] | 17
|
2021-01-21T08:14:11.000Z
|
2021-06-09T22:27:00.000Z
|
activelo/examples/suggestions.py
|
jzf2101/boardlaw
|
29126c2a6ab7f11154fb242c303d3b11f1566201
|
[
"MIT"
] | 3
|
2021-02-15T05:18:41.000Z
|
2021-06-30T14:11:26.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from rebar import arrdict
import torch
import torch.distributions
import activelo
def winrate(black, white):
return 1/(1 + np.exp(-(black - white)))
def residual_vs_mean(Σ):
return np.diag(Σ - np.outer(Σ.mean(0), Σ.mean(0))/Σ.mean())
def resid_var(ranks, truth):
return (((truth - truth.mean()) - (ranks - ranks.mean()))**2).sum()/((truth - truth.mean())**2).sum()
def status(soln):
return f'{soln.σresid:.2f}σd, {soln.resid_var:.0%} resid var'
def plot(trace, truth=None, t=-1):
soln = trace[t]
N = len(soln.μ)
fig, axes = plt.subplots(1, 3)
fig.set_size_inches(18, 6)
ax = axes[0]
μd = soln.μ - soln.μ.mean()
σd = residual_vs_mean(soln.Σ)**.5
ax.errorbar(np.arange(N), μd, yerr=σd, marker='.', linestyle='', capsize=2)
if truth is not None:
ax.plot(truth - truth.mean())
ax.set_title(f'μ±σ vs. mean agent')
ax.set_xlabel(status(soln))
ax = axes[1]
eigs = np.linalg.eigvalsh(soln.Σ)
ax.plot(eigs)
ax.set_yscale('log')
ax.set_title(f'Σ spectrum')
ax.set_xlabel(f'condition: {eigs.max()/eigs.min():.0G}')
ax = axes[2]
T = len(trace) if isinstance(trace, list) else len(trace.μ)
ax.imshow(soln.n, cmap='Greens')
ax.set_title('games played')
ax.set_xlabel(f'{T} rounds, {int(soln.n.sum()/(2*T))} games per')
return fig
def simulate(truth, n_games=256, σresid_tol=.1):
n_agents = len(truth)
wins = torch.zeros((n_agents, n_agents))
games = torch.zeros((n_agents, n_agents))
trace = []
ranks = torch.full((n_agents,), 0.)
while True:
soln = activelo.solve(games, wins)
ranks = torch.as_tensor(soln.μ)
black, white = activelo.suggest(soln)
black_wins = torch.distributions.Binomial(n_games, winrate(truth[black], truth[white])).sample()
wins[black, white] += black_wins
wins[white, black] += n_games - black_wins
games[black, white] += n_games
games[white, black] += n_games
soln['n'] = games.clone()
soln['w'] = wins.clone()
soln['σresid'] = residual_vs_mean(soln.Σ).mean()**.5
soln['resid_var'] = resid_var(ranks, truth)
trace.append(arrdict.arrdict({k: v for k, v in soln.items() if k != 'trace'}))
plt.close()
from IPython import display
display.clear_output(wait=True)
display.display(plot(trace, truth))
if soln.σresid < σresid_tol:
break
trace = arrdict.stack(trace)
return trace
def linear_ranks(n_agents=10):
return torch.linspace(1, 5, n_agents).float()
def log_ranks(n_agents=10):
return torch.linspace(1, 50, n_agents).float().log()
def pow_ranks(n_agents=10, pow=.5):
return torch.linspace(1, 50, n_agents).float().pow(pow)
def simulate_log_ranks():
truth = log_ranks(10)
trace = simulate(truth)
plot(trace, truth)
def random_ranks(n_agents=10):
deltas = torch.randn((n_agents,))/n_agents**.5
totals = deltas.cumsum(0)
totals = totals - totals.min()
return torch.sort(totals).values
def simulate_random_ranks():
counts = []
for _ in range(100):
ranks = random_ranks(n_agents=10)
trace = simulate(ranks)
counts.append(len(trace.n))
q = np.quantile(counts, .95)
return q
| 29.584071
| 105
| 0.62429
|
766d98dda5296612a02e04f8a5aa3003c52d46c9
| 2,312
|
py
|
Python
|
examples/tensorflow/mnist/run_preprocess_mnist.py
|
graykode/cella
|
b17859976becd1fca30a0ea897928a08157d22a2
|
[
"Apache-2.0"
] | 71
|
2020-07-16T10:04:40.000Z
|
2022-02-11T13:26:55.000Z
|
examples/tensorflow/mnist/run_preprocess_mnist.py
|
graykode/cella
|
b17859976becd1fca30a0ea897928a08157d22a2
|
[
"Apache-2.0"
] | 16
|
2020-08-10T19:24:16.000Z
|
2022-02-10T02:22:56.000Z
|
examples/tensorflow/mnist/run_preprocess_mnist.py
|
graykode/cella
|
b17859976becd1fca30a0ea897928a08157d22a2
|
[
"Apache-2.0"
] | 5
|
2020-08-12T02:43:16.000Z
|
2021-10-03T18:46:13.000Z
|
# Copyright 2020-present Tae Hwan Jung
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import argparse
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras import datasets
from matorage import *
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255.0, label
def data_save(dataset, evaluate=False):
data_config = DataConfig(
endpoint="127.0.0.1:9000",
access_key="minio",
secret_key="miniosecretkey",
dataset_name="mnist",
additional={
"mode": "train" if not evaluate else "test",
"framework": "tensorflow",
},
attributes=[("image", "float32", (28, 28)), ("target", "int64", (1)),],
)
total_dataset = len(dataset[0])
dataset = tf.data.Dataset.from_tensor_slices(dataset)
dataset = dataset.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
dataset = dataset.batch(60).prefetch(tf.data.experimental.AUTOTUNE)
data_saver = DataSaver(config=data_config, refresh=True)
for (image, target) in tqdm(dataset, total=total_dataset // 60):
data_saver({"image": image, "target": target})
data_saver.disconnect()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tensorflow V2 MNIST Example")
parser.add_argument("--train", action="store_true", default=False)
parser.add_argument("--test", action="store_true", default=False)
args = parser.parse_args()
train_dataset, test_dataset = datasets.mnist.load_data()
start = time.time()
if args.train:
data_save(train_dataset, evaluate=False)
if args.test:
data_save(test_dataset, evaluate=True)
end = time.time()
print(end - start)
| 31.671233
| 79
| 0.688581
|
668a999d03e6fa16982a5c337bd77549cf9ba245
| 5,902
|
py
|
Python
|
bot/pilot.py
|
abbenson/ballbot
|
44ea6abba026ec47c2dcd79450186d9ea43a9e7d
|
[
"BSD-2-Clause"
] | null | null | null |
bot/pilot.py
|
abbenson/ballbot
|
44ea6abba026ec47c2dcd79450186d9ea43a9e7d
|
[
"BSD-2-Clause"
] | null | null | null |
bot/pilot.py
|
abbenson/ballbot
|
44ea6abba026ec47c2dcd79450186d9ea43a9e7d
|
[
"BSD-2-Clause"
] | null | null | null |
"""Autonomous control client that solves IEEE Hardware Competition 2014.
UPDATE (Vijay - 9/27/15): Pilot does not work since the last year's
code has been erased. This file is here for reference and is expected to
be modified for IEEE Hardware Competition 2016.
Please do not instantiate Pilot before it is fixed.
"""
import sys
import time
import lib.lib as lib
import client.ctrl_client as ctrl_client_mod
import client.sub_client as sub_client_mod
class Pilot:
"""Autonomous control client based on comprehensive state machine."""
def __init__(self,
ctrl_addr="tcp://127.0.0.1:60000",
sub_addr="tcp://127.0.0.1:60001"):
# Get config, build logger
self.config = lib.get_config()
self.logger = lib.get_logger()
# Build control client
try:
self.ctrl_client = ctrl_client_mod.CtrlClient(ctrl_addr)
except Exception, e:
self.logger.error("Couldn't build CtrlClient; ctrl_addr: {},"
" error: {}".format(ctrl_addr, e))
sys.exit(-1)
# Build sub client
try:
self.sub_client = sub_client_mod.SubClient(sub_addr)
except Exception, e:
self.logger.error("Couldn't build SubClient; sub_addr: {},"
" error: {}".format(sub_addr, e))
sys.exit(-1)
# Initialize other members
self.ITEM_BACKUP_TIME = 0.2
# Order in which activities are solved.
self.acts = ["simon", "etch", "rubiks", "card"]
def call(self, obj_name, method_name, param_dict=dict()):
"""Light wrapper around ctrl_client to handle result unpacking."""
result = self.ctrl_client.call(obj_name, method_name, param_dict)
if result['type'] == 'error':
self.logger.error("API call error: {}".format(result['msg']))
return None
else:
return result['call_return']
def bail(self, msg):
"""Log error message and exit cleanly, stopping all systems.
:param msg: Error message to log.
:type msg: string
"""
self.logger.error("Can't handle follower result: {}".format(msg))
self.call('ctrl', 'stop_full')
sys.exit(1)
def move(self, speed, angle):
self.call('driver', 'move',
{'speed': speed, 'angle': angle})
def drive(self, speed, angle, duration):
return self.call('driver', 'drive',
{'speed': speed, 'angle': angle,
'duration': duration})
def wait_for_start(self):
"""Waits for color sensor to say it sees start signal.
"""
return self.call('color_sensor', 'watch_for_not_color',
{'color': 'red', "timeout": 180})
def follow(self):
"""Helper function for calling line_follower.
Will kick out at intersection.
"""
dir_of_intersection = \
self.call('follower', 'analog_state')
return dir_of_intersection
def rotate_90(self, direction):
"""call on driver api with whatever args are needed
Pass either "cc" or "c".
"""
return self.call('driver', 'rough_rotate_90',
{'direction': direction})
def solve_activity(self, activity):
"""pass name of activity to solve, will fix as needed.
Choices are:
etch, rubiks, simon, card
"""
return self.call(activity, 'solve')
def follow_ignoring_turns(self):
return self.call('follower', 'follow_ignoring_turns')
def find_dir_of_turn(self):
return self.call('follower', 'find_dir_of_turn')
def find_dir_of_int(self):
return self.call('follower', 'find_dir_of_int')
def rotate_to_line(self, direction):
return self.call('follower', 'rotate_to_line',
{'direction': direction})
def wait_for_ready(self):
return self.call('color_sensor', 'wait_for_ready')
def run(self):
"""Main pilot interface with outside world.
start script will call, and pilot will handle all other logic.
"""
# wait for Start signal to indicate time to run.
# self.wait_for_start()
time.sleep(10)
self.wait_for_ready()
self.drive(40, 0, 0.7) # Leave starting block
for activity in self.acts:
# Follow to intersection.
self.follow_ignoring_turns()
# keep track of direction of branch for returning to main path.
act_dir = self.find_dir_of_int()
self.rotate_to_line(act_dir)
# go to act.
self.follow_ignoring_turns()
# Activities we aren't solving.
if activity == 'card':
self.drive(40, 0, 0.2)
time.sleep(1)
self.drive(40, 180, 0.2)
elif activity == 'simon':
self.logger.debug("Not doing simon")
else:
self.solve_activity(activity)
# Leave box and return to path.
self.move(40, 180)
time.sleep(self.ITEM_BACKUP_TIME)
self.move(0, 0)
self.rotate_90('right')
time.sleep(0.5)
self.rotate_to_line('right')
# line follow back to path
self.follow_ignoring_turns()
# turn back to path
if act_dir == 'left':
self.rotate_to_line('right')
elif act_dir == 'right':
self.rotate_to_line('left')
else:
# error case but somehow possible.
# Guess turn direction.
self.rotate_to_line('right')
self.follow_ignoring_turns()
self.drive(40, 0, 0.5)
if __name__ == "__main__":
Pilot().run()
| 31.393617
| 75
| 0.569976
|
345993dd20ab0cd74debf64aa7eec8a67522278e
| 20,454
|
py
|
Python
|
hyperband/search.py
|
niyatham/scikit-hyperband
|
a5c694c9bb770c8f9a2f3104c0e4326f504594b1
|
[
"BSD-3-Clause"
] | null | null | null |
hyperband/search.py
|
niyatham/scikit-hyperband
|
a5c694c9bb770c8f9a2f3104c0e4326f504594b1
|
[
"BSD-3-Clause"
] | null | null | null |
hyperband/search.py
|
niyatham/scikit-hyperband
|
a5c694c9bb770c8f9a2f3104c0e4326f504594b1
|
[
"BSD-3-Clause"
] | null | null | null |
"""
=========
Hyperband
=========
This module contains a scikit-learn compatible implementation of the hyperband
algorithm[^1].
Compared to the civismlext implementation, this supports multimetric scoring,
and the option to turn the last round of hyperband (the randomized search
round) off.
References
----------
.. [1] Li, L., Jamieson, K., DeSalvo, G., Rostamizadeh, A. and Talwalkar, A.,
2017. Hyperband: A novel bandit-based approach to hyperparameter
optimization. The Journal of Machine Learning Research, 18(1),
pp.6765-6816.
"""
import copy
import numpy as np
from scipy.stats import rankdata
from sklearn.utils import check_random_state
from sklearn.model_selection._search import BaseSearchCV, ParameterSampler
__all__ = ['HyperbandSearchCV']
class HyperbandSearchCV(BaseSearchCV):
"""Hyperband search on hyper parameters.
HyperbandSearchCV implements a ``fit`` and a ``score`` method.
It also implements ``predict``, ``predict_proba``, ``decision_function``,
``transform`` and ``inverse_transform`` if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings using the hyperband
algorithm [1]_ .
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the scikit-learn `User Guide
<http://scikit-learn.org/stable/modules/grid_search.html#randomized-parameter-search>`_.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
resource_param : str, default='n_estimators'
The name of the cost parameter for the estimator ``estimator``
to be fitted. Typically, this is the number of decision trees
``n_estimators`` in an ensemble or the number of iterations
for estimators trained with stochastic gradient descent.
eta : float, default=3
The inverse of the proportion of configurations that are discarded
in each round of hyperband.
min_iter : int, default=1
The minimum amount of resource that should be allocated to the cost
parameter ``resource_param`` for a single configuration of the
hyperparameters.
max_iter : int, default=81
The maximum amount of resource that can be allocated to the cost
parameter ``resource_param`` for a single configuration of the
hyperparameters.
skip_last : int, default=0
The number of last rounds to skip. For example, this can be used
to skip the last round of hyperband, which is standard randomized
search. It can also be used to inspect intermediate results,
although warm-starting HyperbandSearchCV is not supported.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's default scorer (if available) is used.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`sklearn.model_selection.StratifiedKFold`
is used. In all other cases, :class:`sklearn.model_selection.KFold` is used.
Refer `User Guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for the various cross-validation strategies that can be used here.
refit : boolean, or string default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``HyperbandSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_parameters_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int, RandomState instance or None, optional, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : boolean, optional, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.8 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.7 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.8 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.9 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.8, 0.7, 0.8, 0.9],
'split1_test_score' : [0.82, 0.5, 0.7, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.82],
'std_test_score' : [0.02, 0.01, 0.03, 0.03],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.8, 0.9, 0.7],
'split1_train_score' : [0.82, 0.5, 0.7],
'mean_train_score' : [0.81, 0.7, 0.7],
'std_train_score' : [0.03, 0.03, 0.04],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.007, 0.06, 0.04, 0.04],
'std_score_time' : [0.001, 0.002, 0.003, 0.005],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
For multi-metric evaluation, this attribute is present only if
``refit`` is specified.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
References
----------
.. [1] Li, L., Jamieson, K., DeSalvo, G., Rostamizadeh, A. and Talwalkar, A.,
2017. Hyperband: A novel bandit-based approach to hyperparameter
optimization. The Journal of Machine Learning Research, 18(1),
pp.6765-6816.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`sklearn.model_selection.GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`sklearn.model_selection.ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions,
resource_param='n_estimators', eta=3, min_iter=1,
max_iter=81, skip_last=0, scoring=None, n_jobs=1,
iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise', return_train_score=False):
self.param_distributions = param_distributions
self.resource_param = resource_param
self.eta = eta
self.min_iter = min_iter
self.max_iter = max_iter
self.skip_last = skip_last
self.random_state = random_state
super(HyperbandSearchCV, self).__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
def _run_search(self, evaluate_candidates):
self._validate_input()
s_max = int(np.floor(np.log(self.max_iter / self.min_iter) / np.log(self.eta)))
B = (s_max + 1) * self.max_iter
refit_metric = self.refit if self.multimetric_ else 'score'
random_state = check_random_state(self.random_state)
if self.skip_last > s_max:
raise ValueError('skip_last is higher than the total number of rounds')
for round_index, s in enumerate(reversed(range(s_max + 1))):
n = int(np.ceil(int(B / self.max_iter / (s + 1)) * np.power(self.eta, s)))
# initial number of iterations per config
r = self.max_iter / np.power(self.eta, s)
configurations = list(ParameterSampler(param_distributions=self.param_distributions,
n_iter=n,
random_state=random_state))
if self.verbose > 0:
print('Starting bracket {0} (out of {1}) of hyperband'
.format(round_index + 1, s_max + 1))
for i in range((s + 1) - self.skip_last):
n_configs = np.floor(n / np.power(self.eta, i)) # n_i
n_iterations = int(r * np.power(self.eta, i)) # r_i
n_to_keep = int(np.floor(n_configs / self.eta))
if self.verbose > 0:
msg = ('Starting successive halving iteration {0} out of'
' {1}. Fitting {2} configurations, with'
' resource_param {3} set to {4}')
if n_to_keep > 0:
msg += ', and keeping the best {5} configurations.'
msg = msg.format(i + 1, s + 1, len(configurations),
self.resource_param, n_iterations,
n_to_keep)
print(msg)
# Set the cost parameter for every configuration
parameters = copy.deepcopy(configurations)
for configuration in parameters:
configuration[self.resource_param] = n_iterations
results = evaluate_candidates(parameters)
if n_to_keep > 0:
top_configurations = [x for _, x in sorted(zip(results['rank_test_%s' % refit_metric],
results['params']),
key=lambda x: x[0])]
configurations = top_configurations[:n_to_keep]
if self.skip_last > 0:
print('Skipping the last {0} successive halving iterations'
.format(self.skip_last))
def fit(self, X, y=None, groups=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
super().fit(X, y, groups)
s_max = int(np.floor(np.log(self.max_iter / self.min_iter) / np.log(self.eta)))
B = (s_max + 1) * self.max_iter
brackets = []
for round_index, s in enumerate(reversed(range(s_max + 1))):
n = int(np.ceil(int(B / self.max_iter / (s + 1)) * np.power(self.eta, s)))
n_configs = int(sum([np.floor(n / np.power(self.eta, i))
for i in range((s + 1) - self.skip_last)]))
bracket = (round_index + 1) * np.ones(n_configs)
brackets.append(bracket)
self.cv_results_['hyperband_bracket'] = np.hstack(brackets)
return self
def _validate_input(self):
if not isinstance(self.min_iter, int) or self.min_iter <= 0:
raise ValueError('min_iter should be a positive integer, got %s' %
self.min_iter)
if not isinstance(self.max_iter, int) or self.max_iter <= 0:
raise ValueError('max_iter should be a positive integer, got %s' %
self.max_iter)
if self.max_iter < self.min_iter:
raise ValueError('max_iter should be bigger than min_iter, got'
'max_iter=%d and min_iter=%d' % (self.max_iter,
self.min_iter))
if not isinstance(self.skip_last, int) or self.skip_last < 0:
raise ValueError('skip_last should be an integer, got %s' %
self.skip_last)
if not isinstance(self.eta, int) or not self.eta > 1:
raise ValueError('eta should be a positive integer, got %s' %
self.eta)
if self.resource_param not in self.estimator.get_params().keys():
raise ValueError('resource_param is set to %s, but base_estimator %s '
'does not have a parameter with that name' %
(self.resource_param,
self.estimator.__class__.__name__))
| 43.892704
| 106
| 0.598123
|
5576aabf6585e0ecec0ca80bbe230b776039b556
| 6,763
|
py
|
Python
|
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/dns/dnsnsecrec.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/dns/dnsnsecrec.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/dns/dnsnsecrec.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dnsnsecrec(base_resource) :
def __init__(self) :
self._hostname = None
self._type = None
self._nextnsec = None
self._nextrecs = None
self._ttl = None
self.___count = None
@property
def hostname(self) :
r"""Name of the domain.<br/>Minimum length = 1.
"""
try :
return self._hostname
except Exception as e:
raise e
@hostname.setter
def hostname(self, hostname) :
r"""Name of the domain.<br/>Minimum length = 1
"""
try :
self._hostname = hostname
except Exception as e:
raise e
@property
def type(self) :
r"""Type of records to display. Available settings function as follows:
* ADNS - Display all authoritative address records.
* PROXY - Display all proxy address records.
* ALL - Display all address records.<br/>Possible values = ALL, ADNS, PROXY.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
r"""Type of records to display. Available settings function as follows:
* ADNS - Display all authoritative address records.
* PROXY - Display all proxy address records.
* ALL - Display all address records.<br/>Possible values = ALL, ADNS, PROXY
"""
try :
self._type = type
except Exception as e:
raise e
@property
def nextnsec(self) :
r"""Next nsec record in the chain.
"""
try :
return self._nextnsec
except Exception as e:
raise e
@property
def nextrecs(self) :
r"""An array of record types associated with the nsec record.<br/>Possible values = A, NS, CNAME, SOA, MX, AAAA, SRV, RRSIG, NSEC, DNSKEY, PTR, TXT, NAPTR.
"""
try :
return self._nextrecs
except Exception as e:
raise e
@property
def ttl(self) :
r"""Time to Live (TTL), in seconds, for the record.
"""
try :
return self._ttl
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dnsnsecrec_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dnsnsecrec
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.hostname is not None :
return str(self.hostname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the dnsnsecrec resources that are configured on netscaler.
"""
try :
if not name :
obj = dnsnsecrec()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = dnsnsecrec()
obj.hostname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [dnsnsecrec() for _ in range(len(name))]
obj = [dnsnsecrec() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = dnsnsecrec()
obj[i].hostname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
r""" Use this API to fetch all the dnsnsecrec resources that are configured on netscaler.
# This uses dnsnsecrec_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = dnsnsecrec()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of dnsnsecrec resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnsnsecrec()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the dnsnsecrec resources configured on NetScaler.
"""
try :
obj = dnsnsecrec()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of dnsnsecrec resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnsnsecrec()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Type:
ALL = "ALL"
ADNS = "ADNS"
PROXY = "PROXY"
class Nextrecs:
A = "A"
NS = "NS"
CNAME = "CNAME"
SOA = "SOA"
MX = "MX"
AAAA = "AAAA"
SRV = "SRV"
RRSIG = "RRSIG"
NSEC = "NSEC"
DNSKEY = "DNSKEY"
PTR = "PTR"
TXT = "TXT"
NAPTR = "NAPTR"
class dnsnsecrec_response(base_response) :
def __init__(self, length=1) :
self.dnsnsecrec = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dnsnsecrec = [dnsnsecrec() for _ in range(length)]
| 27.052
| 157
| 0.686382
|
545e005ee35d86e386166bb61ac29a671bfdbcb1
| 430
|
py
|
Python
|
vit/formatter/uda_indicator.py
|
kinifwyne/vit
|
e2cbafce922b1e09c4a66e7dc9592c51fe628e9d
|
[
"MIT"
] | 179
|
2020-07-28T08:21:51.000Z
|
2022-03-30T21:39:37.000Z
|
vit/formatter/uda_indicator.py
|
kinifwyne/vit
|
e2cbafce922b1e09c4a66e7dc9592c51fe628e9d
|
[
"MIT"
] | 255
|
2017-02-01T11:49:12.000Z
|
2020-07-26T22:31:25.000Z
|
vit/formatter/uda_indicator.py
|
kinifwyne/vit
|
e2cbafce922b1e09c4a66e7dc9592c51fe628e9d
|
[
"MIT"
] | 26
|
2017-01-17T20:31:13.000Z
|
2020-06-17T13:09:01.000Z
|
from vit.formatter import Formatter
class UdaIndicator(Formatter):
def format(self, value, task):
if not value:
return self.markup_none(self.colorize())
else:
indicator = self.formatter.indicator_uda[self.column]
return (len(indicator), (self.colorize(value), indicator))
def colorize(self, value=None):
return self.colorizer.uda_indicator(self.column, value)
| 33.076923
| 70
| 0.665116
|
fd64ede867a6b709ab8fa1a73e0eb6214b2ec612
| 30,976
|
py
|
Python
|
src/tests/ast_test.py
|
jtolds/pants-lang
|
33d0a4238598af12068f650edb6d72766ca180ec
|
[
"MIT"
] | 5
|
2015-04-28T02:13:22.000Z
|
2017-11-25T07:41:21.000Z
|
src/tests/ast_test.py
|
jtolds/pants-lang
|
33d0a4238598af12068f650edb6d72766ca180ec
|
[
"MIT"
] | null | null | null |
src/tests/ast_test.py
|
jtolds/pants-lang
|
33d0a4238598af12068f650edb6d72766ca180ec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2012, JT Olds <hello@jtolds.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Pants
http://www.pants-lang.org/
AST tests
"""
__author__ = "JT Olds"
__author_email__ = "hello@jtolds.com"
from unittest import TestCase
from ast.parse import parse
from common.errors import ParserError
class ParsingTests(TestCase):
def testSimpleParse(self):
self.assertEqual(repr(parse("hey. there")),
"Program(["
"Application(["
"Term(Variable('hey', 1, 1), [OpenCall(1, 4)], 1, 1), "
"Term(Variable('there', 1, 6), [], 1, 6)"
"], 1, 1)"
"], 1, 1)")
def testArrayVsIndex(self):
self.assertEqual(repr(parse("call. thing [value]")),
"Program(["
"Application(["
"Term(Variable('call', 1, 1), [OpenCall(1, 5)], 1, 1), "
"Term(Variable('thing', 1, 7), [], 1, 7), "
"Term(Array(["
"Application(["
"Term(Variable('value', 1, 14), [], 1, 14)"
"], 1, 14)"
"], 1, 13), [], 1, 13)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("call. thing[key]")),
"Program(["
"Application(["
"Term(Variable('call', 1, 1), [OpenCall(1, 5)], 1, 1), "
"Term(Variable('thing', 1, 7), [Index(["
"Application(["
"Term(Variable('key', 1, 13), [], 1, 13)"
"], 1, 13)"
"], 1, 12)], 1, 7)"
"], 1, 1)"
"], 1, 1)")
def testCallVsField(self):
self.assertEqual(repr(parse("call. thing1. notafield")),
"Program(["
"Application(["
"Term(Variable('call', 1, 1), [OpenCall(1, 5)], 1, 1), "
"Term(Variable('thing1', 1, 7), [OpenCall(1, 13)], 1, 7), "
"Term(Variable('notafield', 1, 15), [], 1, 15)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("call. thing1.afield")),
"Program(["
"Application(["
"Term(Variable('call', 1, 1), [OpenCall(1, 5)], 1, 1), "
"Term(Variable('thing1', 1, 7), [Field('afield', 1, 13)], 1, 7)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("call. function..afield")),
"Program(["
"Application(["
"Term(Variable('call', 1, 1), [OpenCall(1, 5)], 1, 1), "
"Term(Variable('function', 1, 7), ["
"OpenCall(1, 15), "
"Field('afield', 1, 16)"
"], 1, 7)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("call. function..afuncfield.")),
"Program(["
"Application(["
"Term(Variable('call', 1, 1), [OpenCall(1, 5)], 1, 1), "
"Term(Variable('function', 1, 7), ["
"OpenCall(1, 15), "
"Field('afuncfield', 1, 16), "
"OpenCall(1, 27)"
"], 1, 7)"
"], 1, 1)"
"], 1, 1)")
def testClosedCall(self):
self.assertEqual(repr(parse("f(arg)")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), ["
"ClosedCall([], ["
"PositionalOutArgument(Application(["
"Term(Variable('arg', 1, 3), [], 1, 3)"
"], 1, 3), 1, 3)"
"], 1, 2)"
"], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("f(arg1, arg2)")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), ["
"ClosedCall([], ["
"PositionalOutArgument(Application(["
"Term(Variable('arg1', 1, 3), [], 1, 3)"
"], 1, 3), 1, 3), "
"PositionalOutArgument(Application(["
"Term(Variable('arg2', 1, 9), [], 1, 9)"
"], 1, 9), 1, 9)"
"], 1, 2)"
"], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("f (arg)")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), [], 1, 1), "
"Term(Subexpression(["
"Application(["
"Term(Variable('arg', 1, 4), [], 1, 4)"
"], 1, 4)"
"], 1, 3), [], 1, 3)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("f.(arg)")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), ["
"OpenCall(1, 2), "
"ClosedCall([], ["
"PositionalOutArgument(Application(["
"Term(Variable('arg', 1, 4), [], 1, 4)"
"], 1, 4), 1, 4)"
"], 1, 3)"
"], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("f. (arg)")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), [OpenCall(1, 2)], 1, 1), "
"Term(Subexpression(["
"Application(["
"Term(Variable('arg', 1, 5), [], 1, 5)"
"], 1, 5)"
"], 1, 4), [], 1, 4)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("f(arg1;arg2)")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), ["
"ClosedCall(["
"PositionalOutArgument(Application(["
"Term(Variable('arg1', 1, 3), [], 1, 3)"
"], 1, 3), 1, 3)"
"], ["
"PositionalOutArgument(Application(["
"Term(Variable('arg2', 1, 8), [], 1, 8)"
"], 1, 8), 1, 8)"
"], 1, 2)"
"], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("f(arg1,arg2;arg3)")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), ["
"ClosedCall(["
"PositionalOutArgument(Application(["
"Term(Variable('arg1', 1, 3), [], 1, 3)"
"], 1, 3), 1, 3), "
"PositionalOutArgument(Application(["
"Term(Variable('arg2', 1, 8), [], 1, 8)"
"], 1, 8), 1, 8)"
"], ["
"PositionalOutArgument(Application(["
"Term(Variable('arg3', 1, 13), [], 1, 13)"
"], 1, 13), 1, 13)"
"], 1, 2)"
"], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("f(arg1,arg2,;arg3)")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), ["
"ClosedCall(["
"PositionalOutArgument(Application(["
"Term(Variable('arg1', 1, 3), [], 1, 3)"
"], 1, 3), 1, 3), "
"PositionalOutArgument(Application(["
"Term(Variable('arg2', 1, 8), [], 1, 8)"
"], 1, 8), 1, 8)"
"], ["
"PositionalOutArgument(Application(["
"Term(Variable('arg3', 1, 14), [], 1, 14)"
"], 1, 14), 1, 14)"
"], 1, 2)"
"], 1, 1)"
"], 1, 1)"
"], 1, 1)")
def testSingleLists(self):
self.assertEqual(repr(parse("[z,]")),
"Program(["
"Application(["
"Term(Array(["
"Application(["
"Term(Variable('z', 1, 2), [], 1, 2)"
"], 1, 2)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{test1:test2,}")),
"Program(["
"Application(["
"Term(Dict(["
"DictDefinition(Application(["
"Term(Variable('test1', 1, 2), [], 1, 2)"
"], 1, 2), Application(["
"Term(Variable('test2', 1, 8), [], 1, 8)"
"], 1, 8), 1, 2)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("f(arg1,)")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), ["
"ClosedCall([], ["
"PositionalOutArgument(Application(["
"Term(Variable('arg1', 1, 3), [], 1, 3)"
"], 1, 3), 1, 3)"
"], 1, 2)"
"], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|a,;| null}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Variable('null', 1, 8), [], 1, 8)"
"], 1, 8)"
"], ["
"RequiredInArgument('a', 1, 3)"
"], [], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|b,| null}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Variable('null', 1, 7), [], 1, 7)"
"], 1, 7)"
"], [], ["
"RequiredInArgument('b', 1, 3)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
def testFunctions(self):
self.assertEqual(repr(parse("{}")),
"Program(["
"Application(["
"Term(Dict([], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertRaisesRegexp(ParserError,
"Error at line 1, column 10: unexpected input at close of function",
parse, "{|| thing:thing}")
self.assertEqual(repr(parse("{thing1:thing2}")),
"Program(["
"Application(["
"Term(Dict(["
"DictDefinition(Application(["
"Term(Variable('thing1', 1, 2), [], 1, 2)"
"], 1, 2), Application(["
"Term(Variable('thing2', 1, 9), [], 1, 9)"
"], 1, 9), 1, 2)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{null}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Variable('null', 1, 2), [], 1, 2)"
"], 1, 2)"
"], [], [], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{{thing1:thing2}}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Dict(["
"DictDefinition(Application(["
"Term(Variable('thing1', 1, 3), [], 1, 3)"
"], 1, 3), Application(["
"Term(Variable('thing2', 1, 10), [], 1, 10)"
"], 1, 10), 1, 3)"
"], 1, 2), [], 1, 2)"
"], 1, 2)"
"], [], [], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|a| print(\"hi\"); null}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Variable('print', 1, 6), ["
"ClosedCall([], ["
"PositionalOutArgument(Application(["
"Term(String(False, 'hi', 1, 12), [], 1, 12)"
"], 1, 12), 1, 12)"
"], 1, 11)"
"], 1, 6)"
"], 1, 6), "
"Application(["
"Term(Variable('null', 1, 19), [], 1, 19)"
"], 1, 19)"
"], [], ["
"RequiredInArgument('a', 1, 3)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|a,| null}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Variable('null', 1, 7), [], 1, 7)"
"], 1, 7)"
"], [], ["
"RequiredInArgument('a', 1, 3)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|a,b| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 8), [], 1, 8)"
"], 1, 8)"
"], [], ["
"RequiredInArgument('a', 1, 3), "
"RequiredInArgument('b', 1, 5)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|a,b,| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 9), [], 1, 9)"
"], 1, 9)"
"], [], ["
"RequiredInArgument('a', 1, 3), "
"RequiredInArgument('b', 1, 5)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|a,b,c:3| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 12), [], 1, 12)"
"], 1, 12)"
"], [], ["
"RequiredInArgument('a', 1, 3), "
"RequiredInArgument('b', 1, 5), "
"DefaultInArgument('c', Application(["
"Term(Integer(3, 1, 9), [], 1, 9)"
"], 1, 9), 1, 7)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|a,b,c:3,d:4,:(opt)| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 23), [], 1, 23)"
"], 1, 23)"
"], [], ["
"RequiredInArgument('a', 1, 3), "
"RequiredInArgument('b', 1, 5), "
"DefaultInArgument('c', Application(["
"Term(Integer(3, 1, 9), [], 1, 9)"
"], 1, 9), 1, 7), "
"DefaultInArgument('d', Application(["
"Term(Integer(4, 1, 13), [], 1, 13)"
"], 1, 13), 1, 11), "
"SplatInArgument('opt', 1, 15)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertRaisesRegexp(ParserError,
r"Error at line 1, column 16: unexpected input for argument list",
parse, "{|a,b,c:3,d:4,q(opt)| 0}")
self.assertEqual(repr(parse("{|| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 5), [], 1, 5)"
"], 1, 5)"
"], [], [], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{||}")),
"Program(["
"Application(["
"Term(Function([], [], [], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|;| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 6), [], 1, 6)"
"], 1, 6)"
"], [], [], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|a;| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 7), [], 1, 7)"
"], 1, 7)"
"], ["
"RequiredInArgument('a', 1, 3)"
"], [], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|a,b;| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 9), [], 1, 9)"
"], 1, 9)"
"], ["
"RequiredInArgument('a', 1, 3), "
"RequiredInArgument('b', 1, 5)"
"], [], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|a,b,;| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 10), [], 1, 10)"
"], 1, 10)"
"], ["
"RequiredInArgument('a', 1, 3), "
"RequiredInArgument('b', 1, 5)"
"], [], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|:(var),a,b,;| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 17), [], 1, 17)"
"], 1, 17)"
"], ["
"SplatInArgument('var', 1, 3), "
"RequiredInArgument('a', 1, 10), "
"RequiredInArgument('b', 1, 12)"
"], [], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|:(var),a,b,d;e,f,g:5,h:7,:(j)| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 34), [], 1, 34)"
"], 1, 34)"
"], ["
"SplatInArgument('var', 1, 3), "
"RequiredInArgument('a', 1, 10), "
"RequiredInArgument('b', 1, 12), "
"RequiredInArgument('d', 1, 14)"
"], ["
"RequiredInArgument('e', 1, 16), "
"RequiredInArgument('f', 1, 18), "
"DefaultInArgument('g', Application(["
"Term(Integer(5, 1, 22), [], 1, 22)"
"], 1, 22), 1, 20), "
"DefaultInArgument('h', Application(["
"Term(Integer(7, 1, 26), [], 1, 26)"
"], 1, 26), 1, 24), "
"SplatInArgument('j', 1, 28)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|:(a);:(b)| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 14), [], 1, 14)"
"], 1, 14)"
"], ["
"SplatInArgument('a', 1, 3)"
"], ["
"SplatInArgument('b', 1, 8)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("{|;::(b)| 0}")),
"Program(["
"Application(["
"Term(Function(["
"Application(["
"Term(Integer(0, 1, 11), [], 1, 11)"
"], 1, 11)"
"], [], ["
"KeywordInArgument('b', 1, 4)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
def testListExpansion(self):
self.assertEqual(repr(parse("f(thing, :(thing))")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), ["
"ClosedCall([], ["
"PositionalOutArgument(Application(["
"Term(Variable('thing', 1, 3), [], 1, 3)"
"], 1, 3), 1, 3), "
"SplatOutArgument(["
"Application(["
"Term(Variable('thing', 1, 12), [], 1, 12)"
"], 1, 12)"
"], 1, 10)"
"], 1, 2)"
"], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertRaisesRegexp(ParserError,
"Error at line 1, column 3: unexpected input for function call",
parse, "f(:,(thing),thing)")
self.assertEqual(repr(parse("f(*,(thing),thing)")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), ["
"ClosedCall([], ["
"PositionalOutArgument(Application(["
"Term(Variable('*', 1, 3), [], 1, 3)"
"], 1, 3), 1, 3), "
"PositionalOutArgument(Application(["
"Term(Subexpression(["
"Application(["
"Term(Variable('thing', 1, 6), [], 1, 6)"
"], 1, 6)"
"], 1, 5), [], 1, 5)"
"], 1, 5), 1, 5), "
"PositionalOutArgument(Application(["
"Term(Variable('thing', 1, 13), [], 1, 13)"
"], 1, 13), 1, 13)"
"], 1, 2)"
"], 1, 1)"
"], 1, 1)"
"], 1, 1)")
def testByteString(self):
self.assertEqual(repr(parse("f. b\"thing\"")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), ["
"OpenCall(1, 2)"
"], 1, 1), "
"Term(String(True, 'thing', 1, 4), [], 1, 4)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("f. b \"thing\"")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), [OpenCall(1, 2)], 1, 1), "
"Term(Variable('b', 1, 4), [], 1, 4), "
"Term(String(False, 'thing', 1, 6), [], 1, 6)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("f. b\"\"")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), [OpenCall(1, 2)], 1, 1), "
"Term(String(True, '', 1, 4), [], 1, 4)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("f. b \"\"")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), [OpenCall(1, 2)], 1, 1), "
"Term(Variable('b', 1, 4), [], 1, 4), "
"Term(String(False, '', 1, 6), [], 1, 6)"
"], 1, 1)"
"], 1, 1)")
def testParses(self):
self.assertRaisesRegexp(ParserError,
r"Error at line 1, column 4: unexpected input",
parse, ".x := 3")
self.assertEqual(repr(parse("x := 1")),
"Program(["
"Assignment(True, VariableAssignee(Variable('x', 1, 1), 1, 1), "
"Application(["
"Term(Integer(1, 1, 6), [], 1, 6)"
"], 1, 6), 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("(a b).thing")),
"Program(["
"Application(["
"Term(Subexpression(["
"Application(["
"Term(Variable('a', 1, 2), [], 1, 2), "
"Term(Variable('b', 1, 4), [], 1, 4)"
"], 1, 2)"
"], 1, 1), ["
"Field('thing', 1, 6)"
"], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("(a b).thing := 3")),
"Program(["
"Assignment(True, FieldAssignee(Term(Subexpression(["
"Application(["
"Term(Variable('a', 1, 2), [], 1, 2), "
"Term(Variable('b', 1, 4), [], 1, 4)"
"], 1, 2)"
"], 1, 1), [], 1, 1), Field('thing', 1, 6), 1, 1), "
"Application(["
"Term(Integer(3, 1, 16), [], 1, 16)"
"], 1, 16), 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("z.thing := 3; x[4] := 5")),
"Program(["
"Assignment("
"True, "
"FieldAssignee("
"Term(Variable('z', 1, 1), [], 1, 1), "
"Field('thing', 1, 2), 1, 1), "
"Application(["
"Term(Integer(3, 1, 12), [], 1, 12)"
"], 1, 12), 1, 1), "
"Assignment("
"True, "
"IndexAssignee("
"Term(Variable('x', 1, 15), [], 1, 15), "
"Index(["
"Application(["
"Term(Integer(4, 1, 17), [], 1, 17)"
"], 1, 17)"
"], 1, 16), 1, 15), "
"Application(["
"Term(Integer(5, 1, 23), [], 1, 23)"
"], 1, 23), 1, 15)"
"], 1, 1)")
self.assertEqual(repr(parse("x = 1")),
"Program(["
"Assignment("
"False, "
"VariableAssignee(Variable('x', 1, 1), 1, 1), "
"Application(["
"Term(Integer(1, 1, 5), [], 1, 5)"
"], 1, 5), 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("(a b).thing = 3")),
"Program(["
"Assignment("
"False, "
"FieldAssignee("
"Term(Subexpression(["
"Application(["
"Term(Variable('a', 1, 2), [], 1, 2), "
"Term(Variable('b', 1, 4), [], 1, 4)"
"], 1, 2)"
"], 1, 1), [], 1, 1), "
"Field('thing', 1, 6), 1, 1), "
"Application(["
"Term(Integer(3, 1, 15), [], 1, 15)"
"], 1, 15), 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("z.thing = 3; x[4] = 5")),
"Program(["
"Assignment("
"False, "
"FieldAssignee("
"Term(Variable('z', 1, 1), [], 1, 1), "
"Field('thing', 1, 2), 1, 1), "
"Application(["
"Term(Integer(3, 1, 11), [], 1, 11)"
"], 1, 11), 1, 1), "
"Assignment("
"False, "
"IndexAssignee("
"Term(Variable('x', 1, 14), [], 1, 14), "
"Index(["
"Application(["
"Term(Integer(4, 1, 16), [], 1, 16)"
"], 1, 16)"
"], 1, 15), 1, 14), "
"Application(["
"Term(Integer(5, 1, 21), [], 1, 21)"
"], 1, 21), 1, 14)"
"], 1, 1)")
def testNewlines(self):
self.assertEqual(repr(parse("f; f")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), [], 1, 1)"
"], 1, 1), "
"Application(["
"Term(Variable('f', 1, 4), [], 1, 4)"
"], 1, 4)"
"], 1, 1)")
self.assertEqual(repr(parse("f\nf")),
"Program(["
"Application(["
"Term(Variable('f', 1, 1), [], 1, 1)"
"], 1, 1), "
"Application(["
"Term(Variable('f', 2, 1), [], 2, 1)"
"], 2, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("hey there; how are you; ")),
"Program(["
"Application(["
"Term(Variable('hey', 1, 1), [], 1, 1), "
"Term(Variable('there', 1, 5), [], 1, 5)"
"], 1, 1), "
"Application(["
"Term(Variable('how', 1, 12), [], 1, 12), "
"Term(Variable('are', 1, 16), [], 1, 16), "
"Term(Variable('you', 1, 20), [], 1, 20)"
"], 1, 12)"
"], 1, 1)")
self.assertEqual(repr(parse("hey there\nhow are you\n")),
"Program(["
"Application(["
"Term(Variable('hey', 1, 1), [], 1, 1), "
"Term(Variable('there', 1, 5), [], 1, 5)"
"], 1, 1), "
"Application(["
"Term(Variable('how', 2, 1), [], 2, 1), "
"Term(Variable('are', 2, 5), [], 2, 5), "
"Term(Variable('you', 2, 9), [], 2, 9)"
"], 2, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("(f\nf)")),
"Program(["
"Application(["
"Term(Subexpression(["
"Application(["
"Term(Variable('f', 1, 2), [], 1, 2), "
"Term(Variable('f', 2, 1), [], 2, 1)"
"], 1, 2)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("(f;f)")),
"Program(["
"Application(["
"Term(Subexpression(["
"Application(["
"Term(Variable('f', 1, 2), [], 1, 2)"
"], 1, 2), "
"Application(["
"Term(Variable('f', 1, 4), [], 1, 4)"
"], 1, 4)"
"], 1, 1), [], 1, 1)"
"], 1, 1)"
"], 1, 1)")
def testFloatingPoint(self):
self.assertEqual(repr(parse("x = 3")),
"Program(["
"Assignment("
"False, "
"VariableAssignee(Variable('x', 1, 1), 1, 1), "
"Application(["
"Term(Integer(3, 1, 5), [], 1, 5)"
"], 1, 5), 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("x = 3.0")),
"Program(["
"Assignment("
"False, "
"VariableAssignee(Variable('x', 1, 1), 1, 1), "
"Application(["
"Term(Float(3.0, 1, 5), [], 1, 5)"
"], 1, 5), 1, 1)"
"], 1, 1)")
def testEquality(self):
self.assertEqual(repr(parse("x <. 3")),
"Program(["
"Application(["
"Term(Variable('x', 1, 1), [], 1, 1), "
"Term(Variable('<', 1, 3), [OpenCall(1, 4)], 1, 3), "
"Term(Integer(3, 1, 6), [], 1, 6)"
"], 1, 1)"
"], 1, 1)")
self.assertEqual(repr(parse("x ==. 3")),
"Program(["
"Application(["
"Term(Variable('x', 1, 1), [], 1, 1), "
"Term(Variable('==', 1, 3), [OpenCall(1, 5)], 1, 3), "
"Term(Integer(3, 1, 7), [], 1, 7)"
"], 1, 1)"
"], 1, 1)")
def testComments(self):
self.assertEqual(repr(parse("# ||||| this comment shouldn't fail { \n 1 "
"\n")),
"Program(["
"Application(["
"Term(Integer(1, 2, 2), [], 2, 2)"
"], 2, 2)"
"], 1, 1)")
self.assertEqual(repr(parse("# ||||| this comment shouldn't fail { \n "
"# ||||| this comment shouldn't fail { \n 1 \n")),
"Program(["
"Application(["
"Term(Integer(1, 3, 2), [], 3, 2)"
"], 3, 2)"
"], 1, 1)")
self.assertEqual(repr(parse("\n 1 # ||||| this comment shouldn't fail { "
"\n # ||||| this comment shouldn't fail { \n \n")),
"Program(["
"Application(["
"Term(Integer(1, 2, 4), [], 2, 4)"
"], 2, 4)"
"], 1, 1)")
self.assertEqual(repr(parse("\n 1 # ||||| this comment shouldn't fail { "
"\n 2 # ||||| this comment shouldn't fail { \n \n")),
"Program(["
"Application(["
"Term(Integer(1, 2, 4), [], 2, 4)"
"], 2, 4), "
"Application(["
"Term(Integer(2, 3, 2), [], 3, 2)"
"], 3, 2)"
"], 1, 1)")
| 34.804494
| 80
| 0.375
|
aa90ee3ba7a379e9d033b7d1eafc5264d35a4a0c
| 5,044
|
py
|
Python
|
voltha/adapters/adtran_olt/flow/flow_tables.py
|
addyess/voltha
|
50af3d103b045a36db12736ead9064cea7028aff
|
[
"Apache-2.0"
] | null | null | null |
voltha/adapters/adtran_olt/flow/flow_tables.py
|
addyess/voltha
|
50af3d103b045a36db12736ead9064cea7028aff
|
[
"Apache-2.0"
] | null | null | null |
voltha/adapters/adtran_olt/flow/flow_tables.py
|
addyess/voltha
|
50af3d103b045a36db12736ead9064cea7028aff
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flow_entry import FlowEntry
from evc import EVC
class DeviceFlows(object):
""" Tracks existing flows on the device """
def __init__(self):
self._flow_table = dict() # Key = (str)Flow ID, Value = FlowEntry
def __getitem__(self, item):
flow_id = item.flow_id if isinstance(item, FlowEntry) else item
return self._flow_table[flow_id]
def __iter__(self):
for _flow_id, _flow in self._flow_table.items():
yield _flow_id, _flow
def itervalues(self):
for _flow in self._flow_table.values():
yield _flow
def iterkeys(self):
for _id in self._flow_table.keys():
yield _id
def items(self):
return self._flow_table.items()
def values(self):
return self._flow_table.values()
def keys(self):
return self._flow_table.keys()
def __len__(self):
return len(self._flow_table)
def add(self, flow):
assert isinstance(flow, FlowEntry)
if flow.flow_id not in self._flow_table:
self._flow_table[flow.flow_id] = flow
return flow
def get(self, item):
flow_id = item.flow_id if isinstance(item, FlowEntry) else item
return self._flow_table.get(flow_id)
def remove(self, item):
flow_id = item.flow_id if isinstance(item, FlowEntry) else item
return self._flow_table.pop(flow_id, default=None)
def clear_all(self):
self._flow_table = dict()
class DownstreamFlows(object):
"""
Tracks existing flows that are downstream (NNI as source port)
The downstream table is slightly different than the base DeviceFlows
table as it is used to track flows that will become EVCs. The base
table tracks flows that will be EVC-maps (or related to them).
The downstream table is also indexed by a downstream signature that
is composed as follows:
<dev-id>.<ingress-port-number>.<s-tag>.*
In comparison, the upstream flows is similar, but instead of '*' it has the
c-tag (if any).
TODO: Drop device ID from signatures once flow tables are unique to a device handler
"""
def __init__(self):
self._signature_table = dict() # Key = (str)Downstream signature
# |
# +-> downstream-signature
# |
# +-> 'evc' -> EVC
# |
# +-> flow-ids -> flow-entries...
def __getitem__(self, signature):
assert isinstance(signature, str)
return self._signature_table[signature]
def __iter__(self):
for _flow_id, _flow in self._signature_table.items():
yield _flow_id, _flow
def itervalues(self):
for _flow in self._signature_table.values():
yield _flow
def iterkeys(self):
for _id in self._signature_table.keys():
yield _id
def items(self):
return self._signature_table.items()
def values(self):
return self._signature_table.values()
def keys(self):
return self._signature_table.keys()
def __len__(self):
return len(self._signature_table)
def get(self, signature):
assert isinstance(signature, str)
return self._signature_table.get(signature)
def add(self, signature):
assert isinstance(signature, str)
"""
Can be called by upstream flow to reserve a slot
"""
if signature not in self._signature_table:
self._signature_table[signature] = DownstreamFlows.SignatureTableEntry(signature)
return self._signature_table[signature]
def remove(self, signature):
assert isinstance(signature, str)
return self._signature_table.pop(signature)
def clear_all(self):
self._signature_table = dict()
class SignatureTableEntry(object):
def __init__(self, signature):
self._signature = signature
self._evc = None
self._flow_table = DeviceFlows()
@property
def evc(self):
return self._evc
@evc.setter
def evc(self, evc):
assert isinstance(evc, (EVC, type(None)))
self._evc = evc
@property
def flows(self):
return self._flow_table
| 30.756098
| 93
| 0.618557
|
89d46abc887f3b9f316dfb0a7148dc234ea9593a
| 1,668
|
py
|
Python
|
tests/test_helper.py
|
zillionare/backtesting
|
a6272a0e8d86aa01c67c674a1d6ad49d140df472
|
[
"MIT"
] | null | null | null |
tests/test_helper.py
|
zillionare/backtesting
|
a6272a0e8d86aa01c67c674a1d6ad49d140df472
|
[
"MIT"
] | null | null | null |
tests/test_helper.py
|
zillionare/backtesting
|
a6272a0e8d86aa01c67c674a1d6ad49d140df472
|
[
"MIT"
] | null | null | null |
import datetime
import unittest
import numpy as np
from backtest.common.helper import jsonify, tabulate_numpy_array
class HelperTest(unittest.TestCase):
def test_jsonify(self):
class Foo:
def __init__(self):
self.a = 1
self.b = 2
self.c = 3
foo = Foo()
obj = {
"numpy": np.array([0.1, 0.2, 0.3]),
"time": datetime.datetime(2020, 1, 1, 0, 0, 0),
"list": [1, 2, 3],
"dict": {"a": 1, "b": 2},
"str": "hello",
"bool": False,
"None": None,
"structure_array": np.array(
[(1, 2, 3), (4, 5, 6)], dtype=[("a", int), ("b", int), ("c", int)]
),
"foo": foo,
}
actual = jsonify(obj)
exp = {
"numpy": [0.1, 0.2, 0.3],
"time": "2020-01-01T00:00:00",
"list": [1, 2, 3],
"dict": {"a": 1, "b": 2},
"str": "hello",
"bool": False,
"None": None,
"structure_array": [[1, 2, 3], [4, 5, 6]],
"foo": {"a": 1, "b": 2, "c": 3},
}
self.assertDictEqual(actual, exp)
def test_tabulate_numpy_array(self):
arr = np.array(
[(1, 2, 3), (4, 5, 6)], dtype=[("a", int), ("b", int), ("c", int)]
)
actual = tabulate_numpy_array(arr)
exp = [["a", "b", "c"], [1, 2, 3], [4, 5, 6]]
exp = """╒═════╤═════╤═════╕
│ a │ b │ c │
╞═════╪═════╪═════╡
│ 1 │ 2 │ 3 │
├─────┼─────┼─────┤
│ 4 │ 5 │ 6 │
╘═════╧═════╧═════╛"""
self.assertEqual(exp, actual)
| 26.47619
| 82
| 0.375899
|
6a0e9c137e7bc97d364c77047c5c6e29bc5d946a
| 83,828
|
bzl
|
Python
|
go_deps.bzl
|
netsec-ethz/netsec-scion
|
38afcbac6ca32f49a412883fe4369e31419b3f50
|
[
"Apache-2.0"
] | 2
|
2018-01-23T11:42:34.000Z
|
2019-08-26T23:26:31.000Z
|
go_deps.bzl
|
netsec-ethz/netsec-scion
|
38afcbac6ca32f49a412883fe4369e31419b3f50
|
[
"Apache-2.0"
] | 26
|
2017-12-05T14:56:37.000Z
|
2019-11-18T09:58:43.000Z
|
go_deps.bzl
|
netsec-ethz/netsec-scion
|
38afcbac6ca32f49a412883fe4369e31419b3f50
|
[
"Apache-2.0"
] | 6
|
2018-02-02T10:15:35.000Z
|
2019-11-17T14:41:26.000Z
|
# Generated from go.mod by gazelle. DO NOT EDIT
load("@bazel_gazelle//:deps.bzl", "go_repository")
def go_deps():
go_repository(
name = "af_inet_netaddr",
importpath = "inet.af/netaddr",
sum = "h1:tvgqez5ZQoBBiBAGNU/fmJy247yB/7++kcLOEoMYup0=",
version = "v0.0.0-20210903134321-85fa6c94624e",
)
go_repository(
name = "co_honnef_go_tools",
importpath = "honnef.co/go/tools",
sum = "h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=",
version = "v0.0.1-2020.1.4",
)
go_repository(
name = "com_github_alecthomas_template",
importpath = "github.com/alecthomas/template",
sum = "h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=",
version = "v0.0.0-20190718012654-fb15b899a751",
)
go_repository(
name = "com_github_alecthomas_units",
importpath = "github.com/alecthomas/units",
sum = "h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=",
version = "v0.0.0-20190924025748-f65c72e2690d",
)
go_repository(
name = "com_github_anmitsu_go_shlex",
importpath = "github.com/anmitsu/go-shlex",
sum = "h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=",
version = "v0.0.0-20161002113705-648efa622239",
)
go_repository(
name = "com_github_antihax_optional",
importpath = "github.com/antihax/optional",
sum = "h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_antlr_antlr4",
importpath = "github.com/antlr/antlr4",
sum = "h1:yxMh4HIdsSh2EqxUESWvzszYMNzOugRyYCeohfwNULM=",
version = "v0.0.0-20181218183524-be58ebffde8e",
)
go_repository(
name = "com_github_armon_circbuf",
importpath = "github.com/armon/circbuf",
sum = "h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=",
version = "v0.0.0-20150827004946-bbbad097214e",
)
go_repository(
name = "com_github_armon_consul_api",
importpath = "github.com/armon/consul-api",
sum = "h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=",
version = "v0.0.0-20180202201655-eb2c6b5be1b6",
)
go_repository(
name = "com_github_armon_go_metrics",
importpath = "github.com/armon/go-metrics",
sum = "h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=",
version = "v0.0.0-20180917152333-f0300d1749da",
)
go_repository(
name = "com_github_armon_go_radix",
importpath = "github.com/armon/go-radix",
sum = "h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=",
version = "v1.0.0",
)
go_repository(
name = "com_github_azure_go_ansiterm",
importpath = "github.com/Azure/go-ansiterm",
sum = "h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=",
version = "v0.0.0-20170929234023-d6e3b3328b78",
)
go_repository(
name = "com_github_bazelbuild_rules_go",
importpath = "github.com/bazelbuild/rules_go",
sum = "h1:SfxjyO/V68rVnzOHop92fB0gv/Aa75KNLAN0PMqXbIw=",
version = "v0.29.0",
)
go_repository(
name = "com_github_beorn7_perks",
importpath = "github.com/beorn7/perks",
sum = "h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=",
version = "v1.0.1",
)
go_repository(
name = "com_github_bgentry_speakeasy",
importpath = "github.com/bgentry/speakeasy",
sum = "h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=",
version = "v0.1.0",
)
go_repository(
name = "com_github_bketelsen_crypt",
importpath = "github.com/bketelsen/crypt",
sum = "h1:w/jqZtC9YD4DS/Vp9GhWfWcCpuAL58oTnLoI8vE9YHU=",
version = "v0.0.4",
)
go_repository(
name = "com_github_bradfitz_go_smtpd",
importpath = "github.com/bradfitz/go-smtpd",
sum = "h1:ckJgFhFWywOx+YLEMIJsTb+NV6NexWICk5+AMSuz3ss=",
version = "v0.0.0-20170404230938-deb6d6237625",
)
go_repository(
name = "com_github_buger_jsonparser",
importpath = "github.com/buger/jsonparser",
sum = "h1:D21IyuvjDCshj1/qq+pCNd3VZOAEI9jy6Bi131YlXgI=",
version = "v0.0.0-20181115193947-bf1c66bbce23",
)
go_repository(
name = "com_github_buildkite_go_buildkite_v2",
importpath = "github.com/buildkite/go-buildkite/v2",
sum = "h1:aWsgMl3lA6xLSAyAhEO0DaHg7a7GVf/50mqVDC1Zt0A=",
version = "v2.8.1",
)
go_repository(
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_burntsushi_xgb",
importpath = "github.com/BurntSushi/xgb",
sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=",
version = "v0.0.0-20160522181843-27f122750802",
)
go_repository(
name = "com_github_cenkalti_backoff",
importpath = "github.com/cenkalti/backoff",
sum = "h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=",
version = "v2.2.1+incompatible",
)
go_repository(
name = "com_github_census_instrumentation_opencensus_proto",
importpath = "github.com/census-instrumentation/opencensus-proto",
sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
version = "v0.2.1",
)
go_repository(
name = "com_github_cespare_xxhash",
importpath = "github.com/cespare/xxhash",
sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=",
version = "v1.1.0",
)
go_repository(
name = "com_github_cespare_xxhash_v2",
importpath = "github.com/cespare/xxhash/v2",
sum = "h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=",
version = "v2.1.1",
)
go_repository(
name = "com_github_cheekybits_genny",
importpath = "github.com/cheekybits/genny",
sum = "h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=",
version = "v1.0.0",
)
go_repository(
name = "com_github_chzyer_logex",
importpath = "github.com/chzyer/logex",
sum = "h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=",
version = "v1.1.10",
)
go_repository(
name = "com_github_chzyer_readline",
importpath = "github.com/chzyer/readline",
sum = "h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=",
version = "v0.0.0-20180603132655-2972be24d48e",
)
go_repository(
name = "com_github_chzyer_test",
importpath = "github.com/chzyer/test",
sum = "h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=",
version = "v0.0.0-20180213035817-a1ea475d72b1",
)
go_repository(
name = "com_github_client9_misspell",
importpath = "github.com/client9/misspell",
sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
version = "v0.3.4",
)
go_repository(
name = "com_github_cncf_udpa_go",
importpath = "github.com/cncf/udpa/go",
sum = "h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M=",
version = "v0.0.0-20201120205902-5459f2c99403",
)
go_repository(
name = "com_github_cncf_xds_go",
importpath = "github.com/cncf/xds/go",
sum = "h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c=",
version = "v0.0.0-20210312221358-fbca930ec8ed",
)
go_repository(
name = "com_github_codahale_hdrhistogram",
importpath = "github.com/codahale/hdrhistogram",
sum = "h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=",
version = "v0.0.0-20161010025455-3a0bb77429bd",
)
go_repository(
name = "com_github_containerd_containerd",
importpath = "github.com/containerd/containerd",
sum = "h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=",
version = "v1.3.0",
)
go_repository(
name = "com_github_containerd_stargz_snapshotter_estargz",
importpath = "github.com/containerd/stargz-snapshotter/estargz",
sum = "h1:5e7heayhB7CcgdTkqfZqrNaNv15gABwr3Q2jBTbLlt4=",
version = "v0.4.1",
)
go_repository(
name = "com_github_coreos_bbolt",
importpath = "github.com/coreos/bbolt",
sum = "h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=",
version = "v1.3.2",
)
go_repository(
name = "com_github_coreos_etcd",
importpath = "github.com/coreos/etcd",
sum = "h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=",
version = "v3.3.10+incompatible",
)
go_repository(
name = "com_github_coreos_go_semver",
importpath = "github.com/coreos/go-semver",
sum = "h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=",
version = "v0.3.0",
)
go_repository(
name = "com_github_coreos_go_systemd",
importpath = "github.com/coreos/go-systemd",
sum = "h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=",
version = "v0.0.0-20190321100706-95778dfbb74e",
)
go_repository(
name = "com_github_coreos_go_systemd_v22",
importpath = "github.com/coreos/go-systemd/v22",
sum = "h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=",
version = "v22.3.2",
)
go_repository(
name = "com_github_coreos_pkg",
importpath = "github.com/coreos/pkg",
sum = "h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=",
version = "v0.0.0-20180928190104-399ea9e2e55f",
)
go_repository(
name = "com_github_cpuguy83_go_md2man_v2",
importpath = "github.com/cpuguy83/go-md2man/v2",
sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=",
version = "v2.0.0",
)
go_repository(
name = "com_github_creack_pty",
importpath = "github.com/creack/pty",
sum = "h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=",
version = "v1.1.9",
)
go_repository(
name = "com_github_cyberdelia_templates",
importpath = "github.com/cyberdelia/templates",
sum = "h1:/ovYnF02fwL0kvspmy9AuyKg1JhdTRUgPw4nUxd9oZM=",
version = "v0.0.0-20141128023046-ca7fffd4298c",
)
go_repository(
name = "com_github_davecgh_go_spew",
importpath = "github.com/davecgh/go-spew",
sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=",
version = "v1.1.1",
)
go_repository(
name = "com_github_dchest_cmac",
importpath = "github.com/dchest/cmac",
sum = "h1:qoavXEzRRUfup81LsDQv4fnUQbLyorpPz6WxiwdiU7A=",
version = "v0.0.0-20150527144652-62ff55a1048c",
)
go_repository(
name = "com_github_decred_dcrd_crypto_blake256",
importpath = "github.com/decred/dcrd/crypto/blake256",
sum = "h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_decred_dcrd_dcrec_secp256k1_v4",
importpath = "github.com/decred/dcrd/dcrec/secp256k1/v4",
sum = "h1:1iy2qD6JEhHKKhUOA9IWs7mjco7lnw2qx8FsRI2wirE=",
version = "v4.0.0-20210816181553-5444fa50b93d",
)
go_repository(
name = "com_github_deepmap_oapi_codegen",
importpath = "github.com/deepmap/oapi-codegen",
sum = "h1:qpyRY+dzjMai5QejjA53ebnBtcSvIcZOtYwVlsgdxOc=",
version = "v1.9.0",
)
go_repository(
name = "com_github_dgrijalva_jwt_go",
importpath = "github.com/dgrijalva/jwt-go",
sum = "h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=",
version = "v3.2.0+incompatible",
)
go_repository(
name = "com_github_dgryski_go_sip13",
importpath = "github.com/dgryski/go-sip13",
sum = "h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=",
version = "v0.0.0-20181026042036-e10d5fee7954",
)
go_repository(
name = "com_github_docker_cli",
importpath = "github.com/docker/cli",
sum = "h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558=",
version = "v0.0.0-20191017083524-a8ff7f821017",
)
go_repository(
name = "com_github_docker_distribution",
importpath = "github.com/docker/distribution",
sum = "h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=",
version = "v2.7.1+incompatible",
)
go_repository(
name = "com_github_docker_docker",
importpath = "github.com/docker/docker",
sum = "h1:Cvj7S8I4Xpx78KAl6TwTmMHuHlZ/0SM60NUneGJQ7IE=",
version = "v1.4.2-0.20190924003213-a8608b5b67c7",
)
go_repository(
name = "com_github_docker_docker_credential_helpers",
importpath = "github.com/docker/docker-credential-helpers",
sum = "h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=",
version = "v0.6.3",
)
go_repository(
name = "com_github_docker_go_connections",
importpath = "github.com/docker/go-connections",
sum = "h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=",
version = "v0.4.0",
)
go_repository(
name = "com_github_docker_go_units",
importpath = "github.com/docker/go-units",
sum = "h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=",
version = "v0.4.0",
)
go_repository(
name = "com_github_docopt_docopt_go",
importpath = "github.com/docopt/docopt-go",
sum = "h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=",
version = "v0.0.0-20180111231733-ee0de3bc6815",
)
go_repository(
name = "com_github_dustin_go_humanize",
importpath = "github.com/dustin/go-humanize",
sum = "h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_dvyukov_go_fuzz",
importpath = "github.com/dvyukov/go-fuzz",
sum = "h1:q1oJaUPdmpDm/VyXosjgPgr6wS7c5iV2p0PwJD73bUI=",
version = "v0.0.0-20210103155950-6a8e9d1f2415",
)
go_repository(
name = "com_github_emicklei_go_restful",
importpath = "github.com/emicklei/go-restful",
sum = "h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=",
version = "v2.9.5+incompatible",
)
go_repository(
name = "com_github_emojisum_emojisum",
importpath = "github.com/emojisum/emojisum",
sum = "h1:3fEA+fY1ujuzNCOgd6Y1E/JndDFIm34GcYkdmwba0bI=",
version = "v0.0.0-20210601164913-cb9db27ebae2",
)
go_repository(
name = "com_github_envoyproxy_go_control_plane",
importpath = "github.com/envoyproxy/go-control-plane",
sum = "h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s=",
version = "v0.9.9-0.20210512163311-63b5d3c536b0",
)
go_repository(
name = "com_github_envoyproxy_protoc_gen_validate",
importpath = "github.com/envoyproxy/protoc-gen-validate",
sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
version = "v0.1.0",
)
go_repository(
name = "com_github_fatih_color",
importpath = "github.com/fatih/color",
sum = "h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=",
version = "v1.9.0",
)
go_repository(
name = "com_github_flynn_go_shlex",
importpath = "github.com/flynn/go-shlex",
sum = "h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=",
version = "v0.0.0-20150515145356-3f9db97f8568",
)
go_repository(
name = "com_github_francoispqt_gojay",
importpath = "github.com/francoispqt/gojay",
sum = "h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=",
version = "v1.2.13",
)
go_repository(
name = "com_github_fsnotify_fsnotify",
importpath = "github.com/fsnotify/fsnotify",
sum = "h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=",
version = "v1.5.1",
)
go_repository(
name = "com_github_getkin_kin_openapi",
importpath = "github.com/getkin/kin-openapi",
sum = "h1:W/s5/DNnDCR8P+pYyafEWlGk4S7/AfQUWXgrRSSAzf8=",
version = "v0.80.0",
)
go_repository(
name = "com_github_ghodss_yaml",
importpath = "github.com/ghodss/yaml",
sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_gin_contrib_sse",
importpath = "github.com/gin-contrib/sse",
sum = "h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=",
version = "v0.1.0",
)
go_repository(
name = "com_github_gin_gonic_gin",
importpath = "github.com/gin-gonic/gin",
sum = "h1:QmUZXrvJ9qZ3GfWvQ+2wnW/1ePrTEJqPKMYEU3lD/DM=",
version = "v1.7.4",
)
go_repository(
name = "com_github_gliderlabs_ssh",
importpath = "github.com/gliderlabs/ssh",
sum = "h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw=",
version = "v0.1.1",
)
go_repository(
name = "com_github_go_chi_chi_v5",
importpath = "github.com/go-chi/chi/v5",
sum = "h1:4xKeALZdMEsuI5s05PU2Bm89Uc5iM04qFubUCl5LfAQ=",
version = "v5.0.2",
)
go_repository(
name = "com_github_go_chi_cors",
importpath = "github.com/go-chi/cors",
sum = "h1:eHuqxsIw89iXcWnWUN8R72JMibABJTN/4IOYI5WERvw=",
version = "v1.1.1",
)
go_repository(
name = "com_github_go_errors_errors",
importpath = "github.com/go-errors/errors",
sum = "h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=",
version = "v1.0.1",
)
go_repository(
name = "com_github_go_gl_glfw",
importpath = "github.com/go-gl/glfw",
sum = "h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=",
version = "v0.0.0-20190409004039-e6da0acd62b1",
)
go_repository(
name = "com_github_go_gl_glfw_v3_3_glfw",
importpath = "github.com/go-gl/glfw/v3.3/glfw",
sum = "h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=",
version = "v0.0.0-20200222043503-6f7a984d4dc4",
)
go_repository(
name = "com_github_go_kit_kit",
importpath = "github.com/go-kit/kit",
sum = "h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=",
version = "v0.9.0",
)
go_repository(
name = "com_github_go_kit_log",
importpath = "github.com/go-kit/log",
sum = "h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=",
version = "v0.1.0",
)
go_repository(
name = "com_github_go_logfmt_logfmt",
importpath = "github.com/go-logfmt/logfmt",
sum = "h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=",
version = "v0.5.0",
)
go_repository(
name = "com_github_go_logr_logr",
importpath = "github.com/go-logr/logr",
sum = "h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=",
version = "v0.2.0",
)
go_repository(
name = "com_github_go_openapi_jsonpointer",
importpath = "github.com/go-openapi/jsonpointer",
sum = "h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=",
version = "v0.19.5",
)
go_repository(
name = "com_github_go_openapi_jsonreference",
importpath = "github.com/go-openapi/jsonreference",
sum = "h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=",
version = "v0.19.3",
)
go_repository(
name = "com_github_go_openapi_spec",
importpath = "github.com/go-openapi/spec",
sum = "h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=",
version = "v0.19.3",
)
go_repository(
name = "com_github_go_openapi_swag",
importpath = "github.com/go-openapi/swag",
sum = "h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=",
version = "v0.19.14",
)
go_repository(
name = "com_github_go_playground_assert_v2",
importpath = "github.com/go-playground/assert/v2",
sum = "h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=",
version = "v2.0.1",
)
go_repository(
name = "com_github_go_playground_locales",
importpath = "github.com/go-playground/locales",
sum = "h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=",
version = "v0.14.0",
)
go_repository(
name = "com_github_go_playground_universal_translator",
importpath = "github.com/go-playground/universal-translator",
sum = "h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=",
version = "v0.18.0",
)
go_repository(
name = "com_github_go_playground_validator_v10",
importpath = "github.com/go-playground/validator/v10",
sum = "h1:NgTtmN58D0m8+UuxtYmGztBJB7VnPgjj221I1QHci2A=",
version = "v10.9.0",
)
go_repository(
name = "com_github_go_stack_stack",
importpath = "github.com/go-stack/stack",
sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=",
version = "v1.8.0",
)
go_repository(
name = "com_github_go_task_slim_sprig",
importpath = "github.com/go-task/slim-sprig",
sum = "h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=",
version = "v0.0.0-20210107165309-348f09dbbbc0",
)
go_repository(
name = "com_github_goccy_go_json",
importpath = "github.com/goccy/go-json",
sum = "h1:CvMH7LotYymYuLGEohBM1lTZWX4g6jzWUUl2aLFuBoE=",
version = "v0.7.8",
)
go_repository(
name = "com_github_godbus_dbus_v5",
importpath = "github.com/godbus/dbus/v5",
sum = "h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=",
version = "v5.0.4",
)
go_repository(
name = "com_github_gogo_protobuf",
importpath = "github.com/gogo/protobuf",
sum = "h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=",
version = "v1.3.2",
)
go_repository(
name = "com_github_golang_glog",
importpath = "github.com/golang/glog",
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
version = "v0.0.0-20160126235308-23def4e6c14b",
)
go_repository(
name = "com_github_golang_groupcache",
importpath = "github.com/golang/groupcache",
sum = "h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=",
version = "v0.0.0-20200121045136-8c9f03a8e57e",
)
go_repository(
name = "com_github_golang_lint",
importpath = "github.com/golang/lint",
sum = "h1:2hRPrmiwPrp3fQX967rNJIhQPtiGXdlQWAxKbKw3VHA=",
version = "v0.0.0-20180702182130-06c8688daad7",
)
go_repository(
name = "com_github_golang_mock",
importpath = "github.com/golang/mock",
sum = "h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=",
version = "v1.6.0",
)
go_repository(
name = "com_github_golang_protobuf",
importpath = "github.com/golang/protobuf",
sum = "h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=",
version = "v1.5.2",
)
go_repository(
name = "com_github_golang_snappy",
importpath = "github.com/golang/snappy",
sum = "h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=",
version = "v0.0.3",
)
go_repository(
name = "com_github_golangci_lint_1",
importpath = "github.com/golangci/lint-1",
sum = "h1:utua3L2IbQJmauC5IXdEA547bcoU5dozgQAfc8Onsg4=",
version = "v0.0.0-20181222135242-d2cdd8c08219",
)
go_repository(
name = "com_github_google_btree",
importpath = "github.com/google/btree",
sum = "h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_google_go_cmp",
importpath = "github.com/google/go-cmp",
sum = "h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=",
version = "v0.5.6",
)
go_repository(
name = "com_github_google_go_containerregistry",
importpath = "github.com/google/go-containerregistry",
sum = "h1:/+mFTs4AlwsJ/mJe8NDtKb7BxLtbZFpcn8vDsneEkwQ=",
version = "v0.5.1",
)
go_repository(
name = "com_github_google_go_github",
importpath = "github.com/google/go-github",
sum = "h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=",
version = "v17.0.0+incompatible",
)
go_repository(
name = "com_github_google_go_querystring",
importpath = "github.com/google/go-querystring",
sum = "h1:Avad62mreCc9la5buHvHZXbvsY+GPYUVjd8xsi48FYY=",
version = "v1.0.1-0.20190318165438-c8c88dbee036",
)
go_repository(
name = "com_github_google_gofuzz",
importpath = "github.com/google/gofuzz",
sum = "h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=",
version = "v1.1.0",
)
go_repository(
name = "com_github_google_gopacket",
importpath = "github.com/google/gopacket",
sum = "h1:eR3RuANqlK0CQoHJxUdXQNsco+gJykcti01+wqBCuPs=",
version = "v1.1.16-0.20190123011826-102d5ca2098c",
)
go_repository(
name = "com_github_google_martian",
importpath = "github.com/google/martian",
sum = "h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=",
version = "v2.1.0+incompatible",
)
go_repository(
name = "com_github_google_martian_v3",
importpath = "github.com/google/martian/v3",
sum = "h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=",
version = "v3.2.1",
)
go_repository(
name = "com_github_google_pprof",
importpath = "github.com/google/pprof",
sum = "h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=",
version = "v0.0.0-20210720184732-4bb14d4b1be1",
)
go_repository(
name = "com_github_google_renameio",
importpath = "github.com/google/renameio",
sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
version = "v0.1.0",
)
go_repository(
name = "com_github_google_uuid",
importpath = "github.com/google/uuid",
sum = "h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=",
version = "v1.1.2",
)
go_repository(
name = "com_github_googleapis_gax_go",
importpath = "github.com/googleapis/gax-go",
sum = "h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=",
version = "v2.0.0+incompatible",
)
go_repository(
name = "com_github_googleapis_gax_go_v2",
importpath = "github.com/googleapis/gax-go/v2",
sum = "h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso=",
version = "v2.1.0",
)
go_repository(
name = "com_github_googleapis_gnostic",
importpath = "github.com/googleapis/gnostic",
sum = "h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=",
version = "v0.4.1",
)
go_repository(
name = "com_github_gopherjs_gopherjs",
importpath = "github.com/gopherjs/gopherjs",
sum = "h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=",
version = "v0.0.0-20181017120253-0766667cb4d1",
)
go_repository(
name = "com_github_gorilla_mux",
importpath = "github.com/gorilla/mux",
sum = "h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=",
version = "v1.8.0",
)
go_repository(
name = "com_github_gorilla_websocket",
importpath = "github.com/gorilla/websocket",
sum = "h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=",
version = "v1.4.0",
)
go_repository(
name = "com_github_gregjones_httpcache",
importpath = "github.com/gregjones/httpcache",
sum = "h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=",
version = "v0.0.0-20180305231024-9cad4c3443a7",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_middleware",
importpath = "github.com/grpc-ecosystem/go-grpc-middleware",
sum = "h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=",
version = "v1.3.0",
)
go_repository(
name = "com_github_grpc_ecosystem_go_grpc_prometheus",
importpath = "github.com/grpc-ecosystem/go-grpc-prometheus",
sum = "h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=",
version = "v1.2.0",
)
go_repository(
name = "com_github_grpc_ecosystem_grpc_gateway",
importpath = "github.com/grpc-ecosystem/grpc-gateway",
sum = "h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=",
version = "v1.16.0",
)
go_repository(
name = "com_github_grpc_ecosystem_grpc_opentracing",
importpath = "github.com/grpc-ecosystem/grpc-opentracing",
sum = "h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=",
version = "v0.0.0-20180507213350-8e809c8a8645",
)
go_repository(
name = "com_github_hashicorp_consul_api",
importpath = "github.com/hashicorp/consul/api",
sum = "h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw=",
version = "v1.10.1",
)
go_repository(
name = "com_github_hashicorp_consul_sdk",
importpath = "github.com/hashicorp/consul/sdk",
sum = "h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=",
version = "v0.8.0",
)
go_repository(
name = "com_github_hashicorp_errwrap",
importpath = "github.com/hashicorp/errwrap",
sum = "h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_cleanhttp",
importpath = "github.com/hashicorp/go-cleanhttp",
sum = "h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=",
version = "v0.5.1",
)
go_repository(
name = "com_github_hashicorp_go_hclog",
importpath = "github.com/hashicorp/go-hclog",
sum = "h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=",
version = "v0.12.0",
)
go_repository(
name = "com_github_hashicorp_go_immutable_radix",
importpath = "github.com/hashicorp/go-immutable-radix",
sum = "h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_msgpack",
importpath = "github.com/hashicorp/go-msgpack",
sum = "h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=",
version = "v0.5.3",
)
go_repository(
name = "com_github_hashicorp_go_multierror",
importpath = "github.com/hashicorp/go-multierror",
sum = "h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=",
version = "v1.1.0",
)
go_repository(
name = "com_github_hashicorp_go_net",
importpath = "github.com/hashicorp/go.net",
sum = "h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw=",
version = "v0.0.1",
)
go_repository(
name = "com_github_hashicorp_go_rootcerts",
importpath = "github.com/hashicorp/go-rootcerts",
sum = "h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=",
version = "v1.0.2",
)
go_repository(
name = "com_github_hashicorp_go_sockaddr",
importpath = "github.com/hashicorp/go-sockaddr",
sum = "h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_syslog",
importpath = "github.com/hashicorp/go-syslog",
sum = "h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_go_uuid",
importpath = "github.com/hashicorp/go-uuid",
sum = "h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=",
version = "v1.0.1",
)
go_repository(
name = "com_github_hashicorp_golang_lru",
importpath = "github.com/hashicorp/golang-lru",
sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=",
version = "v0.5.1",
)
go_repository(
name = "com_github_hashicorp_hcl",
importpath = "github.com/hashicorp/hcl",
sum = "h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_logutils",
importpath = "github.com/hashicorp/logutils",
sum = "h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=",
version = "v1.0.0",
)
go_repository(
name = "com_github_hashicorp_mdns",
importpath = "github.com/hashicorp/mdns",
sum = "h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8=",
version = "v1.0.1",
)
go_repository(
name = "com_github_hashicorp_memberlist",
importpath = "github.com/hashicorp/memberlist",
sum = "h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g=",
version = "v0.2.2",
)
go_repository(
name = "com_github_hashicorp_serf",
importpath = "github.com/hashicorp/serf",
sum = "h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM=",
version = "v0.9.5",
)
go_repository(
name = "com_github_hpcloud_tail",
importpath = "github.com/hpcloud/tail",
sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=",
version = "v1.0.0",
)
go_repository(
name = "com_github_iancoleman_strcase",
importpath = "github.com/iancoleman/strcase",
sum = "h1:ECW73yc9MY7935nNYXUkK7Dz17YuSUI9yqRqYS8aBww=",
version = "v0.0.0-20190422225806-e506e3ef7365",
)
go_repository(
name = "com_github_ianlancetaylor_demangle",
importpath = "github.com/ianlancetaylor/demangle",
sum = "h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI=",
version = "v0.0.0-20200824232613-28f6c0f3b639",
)
go_repository(
name = "com_github_inconshreveable_mousetrap",
importpath = "github.com/inconshreveable/mousetrap",
sum = "h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_jellevandenhooff_dkim",
importpath = "github.com/jellevandenhooff/dkim",
sum = "h1:ujPKutqRlJtcfWk6toYVYagwra7HQHbXOaS171b4Tg8=",
version = "v0.0.0-20150330215556-f50fe3d243e1",
)
go_repository(
name = "com_github_joefitzgerald_rainbow_reporter",
importpath = "github.com/joefitzgerald/rainbow-reporter",
sum = "h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo=",
version = "v0.1.0",
)
go_repository(
name = "com_github_jonboulle_clockwork",
importpath = "github.com/jonboulle/clockwork",
sum = "h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=",
version = "v0.1.0",
)
go_repository(
name = "com_github_josharian_intern",
importpath = "github.com/josharian/intern",
sum = "h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=",
version = "v1.0.0",
)
go_repository(
name = "com_github_jpillora_backoff",
importpath = "github.com/jpillora/backoff",
sum = "h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=",
version = "v1.0.0",
)
go_repository(
name = "com_github_json_iterator_go",
importpath = "github.com/json-iterator/go",
sum = "h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=",
version = "v1.1.12",
)
go_repository(
name = "com_github_jstemmer_go_junit_report",
importpath = "github.com/jstemmer/go-junit-report",
sum = "h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=",
version = "v0.9.1",
)
go_repository(
name = "com_github_jtolds_gls",
importpath = "github.com/jtolds/gls",
sum = "h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=",
version = "v4.20.0+incompatible",
)
go_repository(
name = "com_github_julienschmidt_httprouter",
importpath = "github.com/julienschmidt/httprouter",
sum = "h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=",
version = "v1.3.0",
)
go_repository(
name = "com_github_kisielk_errcheck",
importpath = "github.com/kisielk/errcheck",
sum = "h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=",
version = "v1.5.0",
)
go_repository(
name = "com_github_kisielk_gotool",
importpath = "github.com/kisielk/gotool",
sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_konsorten_go_windows_terminal_sequences",
importpath = "github.com/konsorten/go-windows-terminal-sequences",
sum = "h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=",
version = "v1.0.3",
)
go_repository(
name = "com_github_kr_fs",
importpath = "github.com/kr/fs",
sum = "h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=",
version = "v0.1.0",
)
go_repository(
name = "com_github_kr_logfmt",
importpath = "github.com/kr/logfmt",
sum = "h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=",
version = "v0.0.0-20140226030751-b84e30acd515",
)
go_repository(
name = "com_github_kr_pretty",
importpath = "github.com/kr/pretty",
sum = "h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=",
version = "v0.3.0",
)
go_repository(
name = "com_github_kr_pty",
importpath = "github.com/kr/pty",
sum = "h1:hyz3dwM5QLc1Rfoz4FuWJQG5BN7tc6K1MndAUnGpQr4=",
version = "v1.1.5",
)
go_repository(
name = "com_github_kr_text",
importpath = "github.com/kr/text",
sum = "h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=",
version = "v0.2.0",
)
go_repository(
name = "com_github_labstack_echo_v4",
importpath = "github.com/labstack/echo/v4",
sum = "h1:LF5Iq7t/jrtUuSutNuiEWtB5eiHfZ5gSe2pcu5exjQw=",
version = "v4.2.1",
)
go_repository(
name = "com_github_labstack_gommon",
importpath = "github.com/labstack/gommon",
sum = "h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=",
version = "v0.3.0",
)
go_repository(
name = "com_github_leodido_go_urn",
importpath = "github.com/leodido/go-urn",
sum = "h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=",
version = "v1.2.1",
)
go_repository(
name = "com_github_lestrrat_go_backoff_v2",
importpath = "github.com/lestrrat-go/backoff/v2",
sum = "h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A=",
version = "v2.0.8",
)
go_repository(
name = "com_github_lestrrat_go_blackmagic",
importpath = "github.com/lestrrat-go/blackmagic",
sum = "h1:XzdxDbuQTz0RZZEmdU7cnQxUtFUzgCSPq8RCz4BxIi4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_lestrrat_go_codegen",
importpath = "github.com/lestrrat-go/codegen",
sum = "h1:HWA03icqONV45riBbrVx+VOjL6esg4LUAuYlvpl/ZLU=",
version = "v1.0.2",
)
go_repository(
name = "com_github_lestrrat_go_httpcc",
importpath = "github.com/lestrrat-go/httpcc",
sum = "h1:FszVC6cKfDvBKcJv646+lkh4GydQg2Z29scgUfkOpYc=",
version = "v1.0.0",
)
go_repository(
name = "com_github_lestrrat_go_iter",
importpath = "github.com/lestrrat-go/iter",
sum = "h1:q8faalr2dY6o8bV45uwrxq12bRa1ezKrB6oM9FUgN4A=",
version = "v1.0.1",
)
go_repository(
name = "com_github_lestrrat_go_jwx",
importpath = "github.com/lestrrat-go/jwx",
sum = "h1:wO7fEc3PW56wpQBMU5CyRkrk4DVsXxCoJg7oIm5HHE4=",
version = "v1.2.7",
)
go_repository(
name = "com_github_lestrrat_go_option",
importpath = "github.com/lestrrat-go/option",
sum = "h1:WqAWL8kh8VcSoD6xjSH34/1m8yxluXQbDeKNfvFeEO4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_lucas_clemente_quic_go",
importpath = "github.com/lucas-clemente/quic-go",
sum = "h1:5vFnKtZ6nHDFsc/F3uuiF4T3y/AXaQdxjUqiVw26GZE=",
version = "v0.23.0",
)
go_repository(
name = "com_github_lunixbochs_vtclean",
importpath = "github.com/lunixbochs/vtclean",
sum = "h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8=",
version = "v1.0.0",
)
go_repository(
name = "com_github_magiconair_properties",
importpath = "github.com/magiconair/properties",
sum = "h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=",
version = "v1.8.5",
)
go_repository(
name = "com_github_mailru_easyjson",
importpath = "github.com/mailru/easyjson",
sum = "h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=",
version = "v0.7.7",
)
go_repository(
name = "com_github_marten_seemann_qpack",
importpath = "github.com/marten-seemann/qpack",
sum = "h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs=",
version = "v0.2.1",
)
go_repository(
name = "com_github_marten_seemann_qtls_go1_15",
importpath = "github.com/marten-seemann/qtls-go1-15",
sum = "h1:RehYMOyRW8hPVEja1KBVsFVNSm35Jj9Mvs5yNoZZ28A=",
version = "v0.1.4",
)
go_repository(
name = "com_github_marten_seemann_qtls_go1_16",
importpath = "github.com/marten-seemann/qtls-go1-16",
sum = "h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco=",
version = "v0.1.4",
)
go_repository(
name = "com_github_marten_seemann_qtls_go1_17",
importpath = "github.com/marten-seemann/qtls-go1-17",
sum = "h1:P9ggrs5xtwiqXv/FHNwntmuLMNq3KaSIG93AtAZ48xk=",
version = "v0.1.0",
)
go_repository(
name = "com_github_matryer_moq",
importpath = "github.com/matryer/moq",
sum = "h1:HvFwW+cm9bCbZ/+vuGNq7CRWXql8c0y8nGeYpqmpvmk=",
version = "v0.0.0-20190312154309-6cfb0558e1bd",
)
go_repository(
name = "com_github_mattn_go_colorable",
importpath = "github.com/mattn/go-colorable",
sum = "h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=",
version = "v0.1.8",
)
go_repository(
name = "com_github_mattn_go_isatty",
importpath = "github.com/mattn/go-isatty",
sum = "h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=",
version = "v0.0.14",
)
go_repository(
name = "com_github_mattn_go_sqlite3",
importpath = "github.com/mattn/go-sqlite3",
sum = "h1:4rQjbDxdu9fSgI/r3KN72G3c2goxknAqHHgPWWs8UlI=",
version = "v1.14.4",
)
go_repository(
name = "com_github_matttproud_golang_protobuf_extensions",
importpath = "github.com/matttproud/golang_protobuf_extensions",
sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=",
version = "v1.0.1",
)
go_repository(
name = "com_github_maxbrunsfeld_counterfeiter_v6",
importpath = "github.com/maxbrunsfeld/counterfeiter/v6",
sum = "h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE=",
version = "v6.2.2",
)
go_repository(
name = "com_github_mdlayher_raw",
importpath = "github.com/mdlayher/raw",
sum = "h1:aFkJ6lx4FPip+S+Uw4aTegFMct9shDvP+79PsSxpm3w=",
version = "v0.0.0-20191009151244-50f2db8cc065",
)
go_repository(
name = "com_github_microcosm_cc_bluemonday",
importpath = "github.com/microcosm-cc/bluemonday",
sum = "h1:SIYunPjnlXcW+gVfvm0IlSeR5U3WZUOLfVmqg85Go44=",
version = "v1.0.1",
)
go_repository(
name = "com_github_microsoft_go_winio",
importpath = "github.com/Microsoft/go-winio",
sum = "h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=",
version = "v0.4.14",
)
go_repository(
name = "com_github_miekg_dns",
importpath = "github.com/miekg/dns",
sum = "h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU=",
version = "v1.1.26",
)
go_repository(
name = "com_github_mitchellh_cli",
importpath = "github.com/mitchellh/cli",
sum = "h1:tEElEatulEHDeedTxwckzyYMA5c86fbmNIUL1hBIiTg=",
version = "v1.1.0",
)
go_repository(
name = "com_github_mitchellh_go_homedir",
importpath = "github.com/mitchellh/go-homedir",
sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=",
version = "v1.1.0",
)
go_repository(
name = "com_github_mitchellh_go_testing_interface",
importpath = "github.com/mitchellh/go-testing-interface",
sum = "h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_gox",
importpath = "github.com/mitchellh/gox",
sum = "h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc=",
version = "v0.4.0",
)
go_repository(
name = "com_github_mitchellh_iochan",
importpath = "github.com/mitchellh/iochan",
sum = "h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_mapstructure",
importpath = "github.com/mitchellh/mapstructure",
sum = "h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=",
version = "v1.4.2",
)
go_repository(
name = "com_github_modern_go_concurrent",
importpath = "github.com/modern-go/concurrent",
sum = "h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=",
version = "v0.0.0-20180306012644-bacd9c7ef1dd",
)
go_repository(
name = "com_github_modern_go_reflect2",
importpath = "github.com/modern-go/reflect2",
sum = "h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=",
version = "v1.0.2",
)
go_repository(
name = "com_github_morikuni_aec",
importpath = "github.com/morikuni/aec",
sum = "h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=",
version = "v1.0.0",
)
go_repository(
name = "com_github_munnerz_goautoneg",
importpath = "github.com/munnerz/goautoneg",
sum = "h1:7PxY7LVfSZm7PEeBTyK1rj1gABdCO2mbri6GKO1cMDs=",
version = "v0.0.0-20120707110453-a547fc61f48d",
)
go_repository(
name = "com_github_mwitkow_go_conntrack",
importpath = "github.com/mwitkow/go-conntrack",
sum = "h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=",
version = "v0.0.0-20190716064945-2f068394615f",
)
go_repository(
name = "com_github_neelance_astrewrite",
importpath = "github.com/neelance/astrewrite",
sum = "h1:D6paGObi5Wud7xg83MaEFyjxQB1W5bz5d0IFppr+ymk=",
version = "v0.0.0-20160511093645-99348263ae86",
)
go_repository(
name = "com_github_neelance_sourcemap",
importpath = "github.com/neelance/sourcemap",
sum = "h1:eFXv9Nu1lGbrNbj619aWwZfVF5HBrm9Plte8aNptuTI=",
version = "v0.0.0-20151028013722-8c68805598ab",
)
go_repository(
name = "com_github_niemeyer_pretty",
importpath = "github.com/niemeyer/pretty",
sum = "h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=",
version = "v0.0.0-20200227124842-a10e7caefd8e",
)
go_repository(
name = "com_github_nxadm_tail",
importpath = "github.com/nxadm/tail",
sum = "h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=",
version = "v1.4.8",
)
go_repository(
name = "com_github_nytimes_gziphandler",
importpath = "github.com/NYTimes/gziphandler",
sum = "h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=",
version = "v0.0.0-20170623195520-56545f4a5d46",
)
go_repository(
name = "com_github_oklog_ulid",
importpath = "github.com/oklog/ulid",
sum = "h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=",
version = "v1.3.1",
)
go_repository(
name = "com_github_oneofone_xxhash",
importpath = "github.com/OneOfOne/xxhash",
sum = "h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=",
version = "v1.2.2",
)
go_repository(
name = "com_github_onsi_ginkgo",
importpath = "github.com/onsi/ginkgo",
sum = "h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=",
version = "v1.16.4",
)
go_repository(
name = "com_github_onsi_gomega",
importpath = "github.com/onsi/gomega",
sum = "h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak=",
version = "v1.13.0",
)
go_repository(
name = "com_github_opencontainers_go_digest",
importpath = "github.com/opencontainers/go-digest",
sum = "h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=",
version = "v1.0.0",
)
go_repository(
name = "com_github_opencontainers_image_spec",
importpath = "github.com/opencontainers/image-spec",
sum = "h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=",
version = "v1.0.1",
)
go_repository(
name = "com_github_opentracing_opentracing_go",
importpath = "github.com/opentracing/opentracing-go",
sum = "h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=",
version = "v1.2.0",
)
go_repository(
name = "com_github_openzipkin_zipkin_go",
importpath = "github.com/openzipkin/zipkin-go",
sum = "h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw=",
version = "v0.1.1",
)
go_repository(
name = "com_github_pascaldekloe_goe",
importpath = "github.com/pascaldekloe/goe",
sum = "h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=",
version = "v0.0.0-20180627143212-57f6aae5913c",
)
go_repository(
name = "com_github_patrickmn_go_cache",
importpath = "github.com/patrickmn/go-cache",
sum = "h1:MUIwjEiAMYk8zkXXUQeb5itrXF+HpS2pfxNsA2a7AiY=",
version = "v2.1.1-0.20180815053127-5633e0862627+incompatible",
)
go_repository(
name = "com_github_pelletier_go_toml",
importpath = "github.com/pelletier/go-toml",
sum = "h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=",
version = "v1.9.4",
)
go_repository(
name = "com_github_pkg_diff",
importpath = "github.com/pkg/diff",
sum = "h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=",
version = "v0.0.0-20210226163009-20ebb0f2a09e",
)
go_repository(
name = "com_github_pkg_errors",
importpath = "github.com/pkg/errors",
sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=",
version = "v0.9.1",
)
go_repository(
name = "com_github_pkg_sftp",
importpath = "github.com/pkg/sftp",
sum = "h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc=",
version = "v1.10.1",
)
go_repository(
name = "com_github_pmezard_go_difflib",
importpath = "github.com/pmezard/go-difflib",
sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_posener_complete",
importpath = "github.com/posener/complete",
sum = "h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=",
version = "v1.2.3",
)
go_repository(
name = "com_github_prometheus_client_golang",
importpath = "github.com/prometheus/client_golang",
sum = "h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=",
version = "v1.11.0",
)
go_repository(
name = "com_github_prometheus_client_model",
importpath = "github.com/prometheus/client_model",
sum = "h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=",
version = "v0.2.0",
)
go_repository(
name = "com_github_prometheus_common",
importpath = "github.com/prometheus/common",
sum = "h1:HRmM4uANZDAjdvbsdfOoqI5UDbjz0faKeMs/cGPKKI0=",
version = "v0.32.0",
)
go_repository(
name = "com_github_prometheus_procfs",
importpath = "github.com/prometheus/procfs",
sum = "h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=",
version = "v0.6.0",
)
go_repository(
name = "com_github_prometheus_tsdb",
importpath = "github.com/prometheus/tsdb",
sum = "h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=",
version = "v0.7.1",
)
go_repository(
name = "com_github_puerkitobio_purell",
importpath = "github.com/PuerkitoBio/purell",
sum = "h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=",
version = "v1.1.1",
)
go_repository(
name = "com_github_puerkitobio_urlesc",
importpath = "github.com/PuerkitoBio/urlesc",
sum = "h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=",
version = "v0.0.0-20170810143723-de5bf2ad4578",
)
go_repository(
name = "com_github_rogpeppe_fastuuid",
importpath = "github.com/rogpeppe/fastuuid",
sum = "h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=",
version = "v1.2.0",
)
go_repository(
name = "com_github_rogpeppe_go_internal",
importpath = "github.com/rogpeppe/go-internal",
sum = "h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=",
version = "v1.8.0",
)
go_repository(
name = "com_github_russross_blackfriday",
importpath = "github.com/russross/blackfriday",
sum = "h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=",
version = "v1.5.2",
)
go_repository(
name = "com_github_russross_blackfriday_v2",
importpath = "github.com/russross/blackfriday/v2",
sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=",
version = "v2.0.1",
)
go_repository(
name = "com_github_ryanuber_columnize",
importpath = "github.com/ryanuber/columnize",
sum = "h1:UFr9zpz4xgTnIE5yIMtWAMngCdZ9p/+q6lTbgelo80M=",
version = "v0.0.0-20160712163229-9b3edd62028f",
)
go_repository(
name = "com_github_sagikazarmark_crypt",
importpath = "github.com/sagikazarmark/crypt",
sum = "h1:AyO7PGna28P9TMH93Bsxd7m9QC4xE6zyGQTXCo7ZrA8=",
version = "v0.1.0",
)
go_repository(
name = "com_github_sclevine_spec",
importpath = "github.com/sclevine/spec",
sum = "h1:1Jwdf9jSfDl9NVmt8ndHqbTZ7XCCPbh1jI3hkDBHVYA=",
version = "v1.2.0",
)
go_repository(
name = "com_github_sean_seed",
importpath = "github.com/sean-/seed",
sum = "h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=",
version = "v0.0.0-20170313163322-e2103e2c3529",
)
go_repository(
name = "com_github_sergi_go_diff",
importpath = "github.com/sergi/go-diff",
sum = "h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=",
version = "v1.2.0",
)
go_repository(
name = "com_github_shurcool_component",
importpath = "github.com/shurcooL/component",
sum = "h1:Fth6mevc5rX7glNLpbAMJnqKlfIkcTjZCSHEeqvKbcI=",
version = "v0.0.0-20170202220835-f88ec8f54cc4",
)
go_repository(
name = "com_github_shurcool_events",
importpath = "github.com/shurcooL/events",
sum = "h1:vabduItPAIz9px5iryD5peyx7O3Ya8TBThapgXim98o=",
version = "v0.0.0-20181021180414-410e4ca65f48",
)
go_repository(
name = "com_github_shurcool_github_flavored_markdown",
importpath = "github.com/shurcooL/github_flavored_markdown",
sum = "h1:qb9IthCFBmROJ6YBS31BEMeSYjOscSiG+EO+JVNTz64=",
version = "v0.0.0-20181002035957-2122de532470",
)
go_repository(
name = "com_github_shurcool_go",
importpath = "github.com/shurcooL/go",
sum = "h1:MZM7FHLqUHYI0Y/mQAt3d2aYa0SiNms/hFqC9qJYolM=",
version = "v0.0.0-20180423040247-9e1955d9fb6e",
)
go_repository(
name = "com_github_shurcool_go_goon",
importpath = "github.com/shurcooL/go-goon",
sum = "h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc=",
version = "v0.0.0-20170922171312-37c2f522c041",
)
go_repository(
name = "com_github_shurcool_gofontwoff",
importpath = "github.com/shurcooL/gofontwoff",
sum = "h1:Yoy/IzG4lULT6qZg62sVC+qyBL8DQkmD2zv6i7OImrc=",
version = "v0.0.0-20180329035133-29b52fc0a18d",
)
go_repository(
name = "com_github_shurcool_gopherjslib",
importpath = "github.com/shurcooL/gopherjslib",
sum = "h1:UOk+nlt1BJtTcH15CT7iNO7YVWTfTv/DNwEAQHLIaDQ=",
version = "v0.0.0-20160914041154-feb6d3990c2c",
)
go_repository(
name = "com_github_shurcool_highlight_diff",
importpath = "github.com/shurcooL/highlight_diff",
sum = "h1:vYEG87HxbU6dXj5npkeulCS96Dtz5xg3jcfCgpcvbIw=",
version = "v0.0.0-20170515013008-09bb4053de1b",
)
go_repository(
name = "com_github_shurcool_highlight_go",
importpath = "github.com/shurcooL/highlight_go",
sum = "h1:7pDq9pAMCQgRohFmd25X8hIH8VxmT3TaDm+r9LHxgBk=",
version = "v0.0.0-20181028180052-98c3abbbae20",
)
go_repository(
name = "com_github_shurcool_home",
importpath = "github.com/shurcooL/home",
sum = "h1:MPblCbqA5+z6XARjScMfz1TqtJC7TuTRj0U9VqIBs6k=",
version = "v0.0.0-20181020052607-80b7ffcb30f9",
)
go_repository(
name = "com_github_shurcool_htmlg",
importpath = "github.com/shurcooL/htmlg",
sum = "h1:crYRwvwjdVh1biHzzciFHe8DrZcYrVcZFlJtykhRctg=",
version = "v0.0.0-20170918183704-d01228ac9e50",
)
go_repository(
name = "com_github_shurcool_httperror",
importpath = "github.com/shurcooL/httperror",
sum = "h1:eHRtZoIi6n9Wo1uR+RU44C247msLWwyA89hVKwRLkMk=",
version = "v0.0.0-20170206035902-86b7830d14cc",
)
go_repository(
name = "com_github_shurcool_httpfs",
importpath = "github.com/shurcooL/httpfs",
sum = "h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo=",
version = "v0.0.0-20171119174359-809beceb2371",
)
go_repository(
name = "com_github_shurcool_httpgzip",
importpath = "github.com/shurcooL/httpgzip",
sum = "h1:fxoFD0in0/CBzXoyNhMTjvBZYW6ilSnTw7N7y/8vkmM=",
version = "v0.0.0-20180522190206-b1c53ac65af9",
)
go_repository(
name = "com_github_shurcool_issues",
importpath = "github.com/shurcooL/issues",
sum = "h1:T4wuULTrzCKMFlg3HmKHgXAF8oStFb/+lOIupLV2v+o=",
version = "v0.0.0-20181008053335-6292fdc1e191",
)
go_repository(
name = "com_github_shurcool_issuesapp",
importpath = "github.com/shurcooL/issuesapp",
sum = "h1:Y+TeIabU8sJD10Qwd/zMty2/LEaT9GNDaA6nyZf+jgo=",
version = "v0.0.0-20180602232740-048589ce2241",
)
go_repository(
name = "com_github_shurcool_notifications",
importpath = "github.com/shurcooL/notifications",
sum = "h1:TQVQrsyNaimGwF7bIhzoVC9QkKm4KsWd8cECGzFx8gI=",
version = "v0.0.0-20181007000457-627ab5aea122",
)
go_repository(
name = "com_github_shurcool_octicon",
importpath = "github.com/shurcooL/octicon",
sum = "h1:bu666BQci+y4S0tVRVjsHUeRon6vUXmsGBwdowgMrg4=",
version = "v0.0.0-20181028054416-fa4f57f9efb2",
)
go_repository(
name = "com_github_shurcool_reactions",
importpath = "github.com/shurcooL/reactions",
sum = "h1:LneqU9PHDsg/AkPDU3AkqMxnMYL+imaqkpflHu73us8=",
version = "v0.0.0-20181006231557-f2e0b4ca5b82",
)
go_repository(
name = "com_github_shurcool_sanitized_anchor_name",
importpath = "github.com/shurcooL/sanitized_anchor_name",
sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_shurcool_users",
importpath = "github.com/shurcooL/users",
sum = "h1:YGaxtkYjb8mnTvtufv2LKLwCQu2/C7qFB7UtrOlTWOY=",
version = "v0.0.0-20180125191416-49c67e49c537",
)
go_repository(
name = "com_github_shurcool_webdavfs",
importpath = "github.com/shurcooL/webdavfs",
sum = "h1:JtcyT0rk/9PKOdnKQzuDR+FSjh7SGtJwpgVpfZBRKlQ=",
version = "v0.0.0-20170829043945-18c3829fa133",
)
go_repository(
name = "com_github_sirupsen_logrus",
importpath = "github.com/sirupsen/logrus",
sum = "h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=",
version = "v1.6.0",
)
go_repository(
name = "com_github_smartystreets_assertions",
importpath = "github.com/smartystreets/assertions",
sum = "h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8=",
version = "v1.0.0",
)
go_repository(
name = "com_github_smartystreets_goconvey",
importpath = "github.com/smartystreets/goconvey",
sum = "h1:I6tZjLXD2Q1kjvNbIzB1wvQBsXmKXiVrhpRE8ZjP5jY=",
version = "v1.6.7",
)
go_repository(
name = "com_github_soheilhy_cmux",
importpath = "github.com/soheilhy/cmux",
sum = "h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=",
version = "v0.1.4",
)
go_repository(
name = "com_github_songgao_water",
importpath = "github.com/songgao/water",
sum = "h1:+y4hCMc/WKsDbAPsOQZgBSaSZ26uh2afyaWeVg/3s/c=",
version = "v0.0.0-20190725173103-fd331bda3f4b",
)
go_repository(
name = "com_github_sourcegraph_annotate",
importpath = "github.com/sourcegraph/annotate",
sum = "h1:yKm7XZV6j9Ev6lojP2XaIshpT4ymkqhMeSghO5Ps00E=",
version = "v0.0.0-20160123013949-f4cad6c6324d",
)
go_repository(
name = "com_github_sourcegraph_syntaxhighlight",
importpath = "github.com/sourcegraph/syntaxhighlight",
sum = "h1:qpG93cPwA5f7s/ZPBJnGOYQNK/vKsaDaseuKT5Asee8=",
version = "v0.0.0-20170531221838-bd320f5d308e",
)
go_repository(
name = "com_github_spaolacci_murmur3",
importpath = "github.com/spaolacci/murmur3",
sum = "h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=",
version = "v0.0.0-20180118202830-f09979ecbc72",
)
go_repository(
name = "com_github_spf13_afero",
importpath = "github.com/spf13/afero",
sum = "h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=",
version = "v1.6.0",
)
go_repository(
name = "com_github_spf13_cast",
importpath = "github.com/spf13/cast",
sum = "h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=",
version = "v1.4.1",
)
go_repository(
name = "com_github_spf13_cobra",
importpath = "github.com/spf13/cobra",
sum = "h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=",
version = "v1.2.1",
)
go_repository(
name = "com_github_spf13_jwalterweatherman",
importpath = "github.com/spf13/jwalterweatherman",
sum = "h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=",
version = "v1.1.0",
)
go_repository(
name = "com_github_spf13_pflag",
importpath = "github.com/spf13/pflag",
sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=",
version = "v1.0.5",
)
go_repository(
name = "com_github_spf13_viper",
importpath = "github.com/spf13/viper",
sum = "h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk=",
version = "v1.9.0",
)
go_repository(
name = "com_github_stretchr_objx",
importpath = "github.com/stretchr/objx",
sum = "h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=",
version = "v0.2.0",
)
go_repository(
name = "com_github_stretchr_testify",
importpath = "github.com/stretchr/testify",
sum = "h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=",
version = "v1.7.0",
)
go_repository(
name = "com_github_subosito_gotenv",
importpath = "github.com/subosito/gotenv",
sum = "h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=",
version = "v1.2.0",
)
go_repository(
name = "com_github_tarm_serial",
importpath = "github.com/tarm/serial",
sum = "h1:UyzmZLoiDWMRywV4DUYb9Fbt8uiOSooupjTq10vpvnU=",
version = "v0.0.0-20180830185346-98f6abe2eb07",
)
go_repository(
name = "com_github_tmc_grpc_websocket_proxy",
importpath = "github.com/tmc/grpc-websocket-proxy",
sum = "h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=",
version = "v0.0.0-20190109142713-0ad062ec5ee5",
)
go_repository(
name = "com_github_uber_jaeger_client_go",
importpath = "github.com/uber/jaeger-client-go",
sum = "h1:R9ec3zO3sGpzs0abd43Y+fBZRJ9uiH6lXyR/+u6brW4=",
version = "v2.29.1+incompatible",
)
go_repository(
name = "com_github_uber_jaeger_lib",
importpath = "github.com/uber/jaeger-lib",
sum = "h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw=",
version = "v2.0.0+incompatible",
)
go_repository(
name = "com_github_ugorji_go",
importpath = "github.com/ugorji/go",
sum = "h1:tGiWC9HENWE2tqYycIqFTNorMmFRVhNwCpDOpWqnk8E=",
version = "v1.2.6",
)
go_repository(
name = "com_github_ugorji_go_codec",
importpath = "github.com/ugorji/go/codec",
sum = "h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ=",
version = "v1.2.6",
)
go_repository(
name = "com_github_valyala_bytebufferpool",
importpath = "github.com/valyala/bytebufferpool",
sum = "h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=",
version = "v1.0.0",
)
go_repository(
name = "com_github_valyala_fasttemplate",
importpath = "github.com/valyala/fasttemplate",
sum = "h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=",
version = "v1.2.1",
)
go_repository(
name = "com_github_viant_assertly",
importpath = "github.com/viant/assertly",
sum = "h1:5x1GzBaRteIwTr5RAGFVG14uNeRFxVNbXPWrK2qAgpc=",
version = "v0.4.8",
)
go_repository(
name = "com_github_viant_toolbox",
importpath = "github.com/viant/toolbox",
sum = "h1:6TteTDQ68CjgcCe8wH3D3ZhUQQOJXMTbj/D9rkk2a1k=",
version = "v0.24.0",
)
go_repository(
name = "com_github_vishvananda_netlink",
importpath = "github.com/vishvananda/netlink",
sum = "h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA=",
version = "v1.1.1-0.20201029203352-d40f9887b852",
)
go_repository(
name = "com_github_vishvananda_netns",
importpath = "github.com/vishvananda/netns",
sum = "h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=",
version = "v0.0.0-20200728191858-db3c7e526aae",
)
go_repository(
name = "com_github_xeipuuv_gojsonpointer",
importpath = "github.com/xeipuuv/gojsonpointer",
sum = "h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=",
version = "v0.0.0-20180127040702-4e3ac2762d5f",
)
go_repository(
name = "com_github_xeipuuv_gojsonreference",
importpath = "github.com/xeipuuv/gojsonreference",
sum = "h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=",
version = "v0.0.0-20180127040603-bd5ef7bd5415",
)
go_repository(
name = "com_github_xeipuuv_gojsonschema",
importpath = "github.com/xeipuuv/gojsonschema",
sum = "h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=",
version = "v1.2.0",
)
go_repository(
name = "com_github_xiang90_probing",
importpath = "github.com/xiang90/probing",
sum = "h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=",
version = "v0.0.0-20190116061207-43a291ad63a2",
)
go_repository(
name = "com_github_xordataexchange_crypt",
importpath = "github.com/xordataexchange/crypt",
sum = "h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=",
version = "v0.0.3-0.20170626215501-b2862e3d0a77",
)
go_repository(
name = "com_github_yuin_goldmark",
importpath = "github.com/yuin/goldmark",
sum = "h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs=",
version = "v1.3.5",
)
go_repository(
name = "com_google_cloud_go",
importpath = "cloud.google.com/go",
sum = "h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio=",
version = "v0.93.3",
)
go_repository(
name = "com_google_cloud_go_bigquery",
importpath = "cloud.google.com/go/bigquery",
sum = "h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=",
version = "v1.8.0",
)
go_repository(
name = "com_google_cloud_go_datastore",
importpath = "cloud.google.com/go/datastore",
sum = "h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=",
version = "v1.1.0",
)
go_repository(
name = "com_google_cloud_go_firestore",
importpath = "cloud.google.com/go/firestore",
sum = "h1:dMIWvm+3O0E3DM7kcZPH0FBQ94Xg/OMkdTNDaY9itbI=",
version = "v1.6.0",
)
go_repository(
name = "com_google_cloud_go_pubsub",
importpath = "cloud.google.com/go/pubsub",
sum = "h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=",
version = "v1.3.1",
)
go_repository(
name = "com_google_cloud_go_storage",
importpath = "cloud.google.com/go/storage",
sum = "h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA=",
version = "v1.10.0",
)
go_repository(
name = "com_shuralyov_dmitri_app_changes",
importpath = "dmitri.shuralyov.com/app/changes",
sum = "h1:hJiie5Bf3QucGRa4ymsAUOxyhYwGEz1xrsVk0P8erlw=",
version = "v0.0.0-20180602232624-0a106ad413e3",
)
go_repository(
name = "com_shuralyov_dmitri_gpu_mtl",
importpath = "dmitri.shuralyov.com/gpu/mtl",
sum = "h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=",
version = "v0.0.0-20190408044501-666a987793e9",
)
go_repository(
name = "com_shuralyov_dmitri_html_belt",
importpath = "dmitri.shuralyov.com/html/belt",
sum = "h1:SPOUaucgtVls75mg+X7CXigS71EnsfVUK/2CgVrwqgw=",
version = "v0.0.0-20180602232347-f7d459c86be0",
)
go_repository(
name = "com_shuralyov_dmitri_service_change",
importpath = "dmitri.shuralyov.com/service/change",
sum = "h1:GvWw74lx5noHocd+f6HBMXK6DuggBB1dhVkuGZbv7qM=",
version = "v0.0.0-20181023043359-a85b471d5412",
)
go_repository(
name = "com_shuralyov_dmitri_state",
importpath = "dmitri.shuralyov.com/state",
sum = "h1:ivON6cwHK1OH26MZyWDCnbTRZZf0IhNsENoNAKFS1g4=",
version = "v0.0.0-20180228185332-28bcc343414c",
)
go_repository(
name = "com_sourcegraph_sourcegraph_go_diff",
importpath = "sourcegraph.com/sourcegraph/go-diff",
sum = "h1:eTiIR0CoWjGzJcnQ3OkhIl/b9GJovq4lSAVRt0ZFEG8=",
version = "v0.5.0",
)
go_repository(
name = "com_sourcegraph_sqs_pbtypes",
importpath = "sourcegraph.com/sqs/pbtypes",
sum = "h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c=",
version = "v0.0.0-20180604144634-d3ebe8f20ae4",
)
go_repository(
name = "in_gopkg_alecthomas_kingpin_v2",
importpath = "gopkg.in/alecthomas/kingpin.v2",
sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=",
version = "v2.2.6",
)
go_repository(
name = "in_gopkg_check_v1",
importpath = "gopkg.in/check.v1",
sum = "h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=",
version = "v1.0.0-20201130134442-10cb98267c6c",
)
go_repository(
name = "in_gopkg_errgo_v2",
importpath = "gopkg.in/errgo.v2",
sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=",
version = "v2.1.0",
)
go_repository(
name = "in_gopkg_fsnotify_v1",
importpath = "gopkg.in/fsnotify.v1",
sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=",
version = "v1.4.7",
)
go_repository(
name = "in_gopkg_inf_v0",
importpath = "gopkg.in/inf.v0",
sum = "h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=",
version = "v0.9.1",
)
go_repository(
name = "in_gopkg_ini_v1",
importpath = "gopkg.in/ini.v1",
sum = "h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c=",
version = "v1.63.2",
)
go_repository(
name = "in_gopkg_resty_v1",
importpath = "gopkg.in/resty.v1",
sum = "h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=",
version = "v1.12.0",
)
go_repository(
name = "in_gopkg_tomb_v1",
importpath = "gopkg.in/tomb.v1",
sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=",
version = "v1.0.0-20141024135613-dd632973f1e7",
)
go_repository(
name = "in_gopkg_yaml_v2",
importpath = "gopkg.in/yaml.v2",
sum = "h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=",
version = "v2.4.0",
)
go_repository(
name = "in_gopkg_yaml_v3",
importpath = "gopkg.in/yaml.v3",
sum = "h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=",
version = "v3.0.0-20210107192922-496545a6307b",
)
go_repository(
name = "io_etcd_go_bbolt",
importpath = "go.etcd.io/bbolt",
sum = "h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=",
version = "v1.3.2",
)
go_repository(
name = "io_etcd_go_etcd_api_v3",
importpath = "go.etcd.io/etcd/api/v3",
sum = "h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw=",
version = "v3.5.0",
)
go_repository(
name = "io_etcd_go_etcd_client_pkg_v3",
importpath = "go.etcd.io/etcd/client/pkg/v3",
sum = "h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU=",
version = "v3.5.0",
)
go_repository(
name = "io_etcd_go_etcd_client_v2",
importpath = "go.etcd.io/etcd/client/v2",
sum = "h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs=",
version = "v2.305.0",
)
go_repository(
name = "io_k8s_code_generator",
importpath = "k8s.io/code-generator",
sum = "h1:kM/68Y26Z/u//TFc1ggVVcg62te8A2yQh57jBfD0FWQ=",
version = "v0.19.7",
)
go_repository(
name = "io_k8s_gengo",
importpath = "k8s.io/gengo",
sum = "h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE=",
version = "v0.0.0-20201113003025-83324d819ded",
)
go_repository(
name = "io_k8s_klog_v2",
importpath = "k8s.io/klog/v2",
sum = "h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=",
version = "v2.4.0",
)
go_repository(
name = "io_k8s_kube_openapi",
importpath = "k8s.io/kube-openapi",
sum = "h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ=",
version = "v0.0.0-20200805222855-6aeccd4b50c6",
)
go_repository(
name = "io_k8s_sigs_structured_merge_diff_v4",
importpath = "sigs.k8s.io/structured-merge-diff/v4",
sum = "h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=",
version = "v4.0.1",
)
go_repository(
name = "io_k8s_sigs_yaml",
importpath = "sigs.k8s.io/yaml",
sum = "h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=",
version = "v1.2.0",
)
go_repository(
name = "io_opencensus_go",
importpath = "go.opencensus.io",
sum = "h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=",
version = "v0.23.0",
)
go_repository(
name = "io_opentelemetry_go_proto_otlp",
importpath = "go.opentelemetry.io/proto/otlp",
sum = "h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8=",
version = "v0.7.0",
)
go_repository(
name = "io_rsc_binaryregexp",
importpath = "rsc.io/binaryregexp",
sum = "h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=",
version = "v0.2.0",
)
go_repository(
name = "io_rsc_quote_v3",
importpath = "rsc.io/quote/v3",
sum = "h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=",
version = "v3.1.0",
)
go_repository(
name = "io_rsc_sampler",
importpath = "rsc.io/sampler",
sum = "h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=",
version = "v1.3.0",
)
go_repository(
name = "org_apache_git_thrift_git",
importpath = "git.apache.org/thrift.git",
sum = "h1:OR8VhtwhcAI3U48/rzBsVOuHi0zDPzYI1xASVcdSgR8=",
version = "v0.0.0-20180902110319-2566ecd5d999",
)
go_repository(
name = "org_go4",
importpath = "go4.org",
sum = "h1:+hE86LblG4AyDgwMCLTE6FOlM9+qjHSYS+rKqxUVdsM=",
version = "v0.0.0-20180809161055-417644f6feb5",
)
go_repository(
name = "org_go4_grpc",
importpath = "grpc.go4.org",
sum = "h1:tmXTu+dfa+d9Evp8NpJdgOy6+rt8/x4yG7qPBrtNfLY=",
version = "v0.0.0-20170609214715-11d0a25b4919",
)
go_repository(
name = "org_go4_intern",
importpath = "go4.org/intern",
sum = "h1:VFTf+jjIgsldaz/Mr00VaCSswHJrI2hIjQygE/W4IMg=",
version = "v0.0.0-20210108033219-3eb7198706b2",
)
go_repository(
name = "org_go4_unsafe_assume_no_moving_gc",
importpath = "go4.org/unsafe/assume-no-moving-gc",
sum = "h1:1tk03FUNpulq2cuWpXZWj649rwJpk0d20rxWiopKRmc=",
version = "v0.0.0-20201222180813-1025295fd063",
)
go_repository(
name = "org_golang_google_api",
importpath = "google.golang.org/api",
sum = "h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI=",
version = "v0.56.0",
)
go_repository(
name = "org_golang_google_appengine",
importpath = "google.golang.org/appengine",
sum = "h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=",
version = "v1.6.7",
)
go_repository(
name = "org_golang_google_genproto",
importpath = "google.golang.org/genproto",
sum = "h1:z+ErRPu0+KS02Td3fOAgdX+lnPDh/VyaABEJPD4JRQs=",
version = "v0.0.0-20210828152312-66f60bf46e71",
)
go_repository(
name = "org_golang_google_grpc",
importpath = "google.golang.org/grpc",
sum = "h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=",
version = "v1.40.0",
)
go_repository(
name = "org_golang_google_grpc_cmd_protoc_gen_go_grpc",
importpath = "google.golang.org/grpc/cmd/protoc-gen-go-grpc",
sum = "h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE=",
version = "v1.1.0",
)
go_repository(
name = "org_golang_google_grpc_examples",
importpath = "google.golang.org/grpc/examples",
sum = "h1:mwWI8e38dmQeNvKtf/J7P9pdLzoXKnB0V5RBdMl0wNU=",
version = "v0.0.0-20210630181457-52546c5d89b7",
)
go_repository(
name = "org_golang_google_protobuf",
importpath = "google.golang.org/protobuf",
sum = "h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=",
version = "v1.27.1",
)
go_repository(
name = "org_golang_x_build",
importpath = "golang.org/x/build",
sum = "h1:E2M5QgjZ/Jg+ObCQAudsXxuTsLj7Nl5RV/lZcQZmKSo=",
version = "v0.0.0-20190111050920-041ab4dc3f9d",
)
go_repository(
name = "org_golang_x_crypto",
importpath = "golang.org/x/crypto",
sum = "h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=",
version = "v0.0.0-20210921155107-089bfa567519",
)
go_repository(
name = "org_golang_x_exp",
importpath = "golang.org/x/exp",
sum = "h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=",
version = "v0.0.0-20200224162631-6cc2880d07d6",
)
go_repository(
name = "org_golang_x_image",
importpath = "golang.org/x/image",
sum = "h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=",
version = "v0.0.0-20190802002840-cff245a6509b",
)
go_repository(
name = "org_golang_x_lint",
importpath = "golang.org/x/lint",
sum = "h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=",
version = "v0.0.0-20210508222113-6edffad5e616",
)
go_repository(
name = "org_golang_x_mobile",
importpath = "golang.org/x/mobile",
sum = "h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=",
version = "v0.0.0-20190719004257-d2bd2a29d028",
)
go_repository(
name = "org_golang_x_mod",
importpath = "golang.org/x/mod",
sum = "h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=",
version = "v0.4.2",
)
go_repository(
name = "org_golang_x_net",
importpath = "golang.org/x/net",
sum = "h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=",
version = "v0.0.0-20220127200216-cd36cc0744dd",
)
go_repository(
name = "org_golang_x_oauth2",
importpath = "golang.org/x/oauth2",
sum = "h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=",
version = "v0.0.0-20210819190943-2bc19b11175f",
)
go_repository(
name = "org_golang_x_perf",
importpath = "golang.org/x/perf",
sum = "h1:xYq6+9AtI+xP3M4r0N1hCkHrInHDBohhquRgx9Kk6gI=",
version = "v0.0.0-20180704124530-6e6d33e29852",
)
go_repository(
name = "org_golang_x_sync",
importpath = "golang.org/x/sync",
sum = "h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=",
version = "v0.0.0-20210220032951-036812b2e83c",
)
go_repository(
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
sum = "h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=",
version = "v0.0.0-20211216021012-1d35b9e2eb4e",
)
go_repository(
name = "org_golang_x_term",
importpath = "golang.org/x/term",
sum = "h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=",
version = "v0.0.0-20210927222741-03fcf44c2211",
)
go_repository(
name = "org_golang_x_text",
importpath = "golang.org/x/text",
sum = "h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=",
version = "v0.3.7",
)
go_repository(
name = "org_golang_x_time",
importpath = "golang.org/x/time",
sum = "h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=",
version = "v0.0.0-20210220033141-f8bda1e9f3ba",
)
go_repository(
name = "org_golang_x_tools",
importpath = "golang.org/x/tools",
sum = "h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=",
version = "v0.1.5",
)
go_repository(
name = "org_golang_x_xerrors",
importpath = "golang.org/x/xerrors",
sum = "h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=",
version = "v0.0.0-20200804184101-5ec99f83aff1",
)
go_repository(
name = "org_uber_go_atomic",
importpath = "go.uber.org/atomic",
sum = "h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=",
version = "v1.7.0",
)
go_repository(
name = "org_uber_go_goleak",
importpath = "go.uber.org/goleak",
sum = "h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=",
version = "v1.1.10",
)
go_repository(
name = "org_uber_go_multierr",
importpath = "go.uber.org/multierr",
sum = "h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=",
version = "v1.6.0",
)
go_repository(
name = "org_uber_go_zap",
importpath = "go.uber.org/zap",
sum = "h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=",
version = "v1.17.0",
)
go_repository(
name = "tools_gotest",
importpath = "gotest.tools",
sum = "h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=",
version = "v2.2.0+incompatible",
)
| 37.675506
| 74
| 0.639583
|
937dfe2b2c345bd1cbd3cfa466db456a4c9a915c
| 7,955
|
py
|
Python
|
webbpsf/tests/test_psfgrid.py
|
AldenJurling/webbpsf
|
9d368ab988a600047f5476020a38c682f59cca6a
|
[
"BSD-3-Clause"
] | 18
|
2015-07-16T18:44:26.000Z
|
2021-05-29T08:30:44.000Z
|
webbpsf/tests/test_psfgrid.py
|
AldenJurling/webbpsf
|
9d368ab988a600047f5476020a38c682f59cca6a
|
[
"BSD-3-Clause"
] | 178
|
2015-01-15T18:59:37.000Z
|
2019-12-09T14:54:31.000Z
|
webbpsf/tests/test_psfgrid.py
|
AldenJurling/webbpsf
|
9d368ab988a600047f5476020a38c682f59cca6a
|
[
"BSD-3-Clause"
] | 14
|
2015-10-06T15:09:00.000Z
|
2019-03-20T13:50:50.000Z
|
import os
import astropy.convolution
from astropy.io import fits
import numpy as np
import pytest
from .. import gridded_library
from .. import webbpsf_core
from .. import utils
def test_compare_to_calc_psf_oversampled():
"""
Check that the output PSF matches calc_psf and is saved in the correct slice of the array:
for a distorted, oversampled case
This case also uses an even length array, so we'll need to subtract 0.5 from the detector
position because psf_grid value in the meta data has been shifted during calc_psf to account
for it being an even length array and this shift shouldn't happen 2x (ie again in calc_psf
call below)
"""
oversample = 2
fov_pixels = 10
# Create PSF grid
fgs = webbpsf_core.FGS()
fgs.detector = "FGS1"
grid = fgs.psf_grid(all_detectors=False, num_psfs=4, oversample=oversample, fov_pixels=fov_pixels)
# Pull one of the PSFs out of the grid
psfnum = 1
loc = grid.meta["grid_xypos"][psfnum]
locy = int(float(loc[1]) - 0.5)
locx = int(float(loc[0]) - 0.5)
gridpsf = grid.data[psfnum, :, :]
# Using meta data, create the expected same PSF via calc_psf
fgs.detector_position = (locx, locy)
calcpsf = fgs.calc_psf(oversample=oversample, fov_pixels=fov_pixels)["OVERDIST"].data
kernel = astropy.convolution.Box2DKernel(width=oversample)
convpsf = astropy.convolution.convolve(calcpsf, kernel)
# Compare to make sure they are in fact the same PSF
assert gridpsf.shape == calcpsf.shape
assert np.array_equal(gridpsf, convpsf)
def test_compare_to_calc_psf_detsampled():
"""
Check that the output PSF matches calc_psf and is saved in the correct slice of the array:
for an un-distorted, detector sampled case
"""
oversample = 2
fov_arcsec = 0.5
# Create PSF grid
mir = webbpsf_core.MIRI()
mir.filter = "F560W"
mir.detector = "MIRIM"
grid = mir.psf_grid(all_detectors=False, num_psfs=4, use_detsampled_psf=True, add_distortion=False,
oversample=oversample, fov_arcsec=fov_arcsec)
# Pull one of the PSFs out of the grid
psfnum = 1
loc = grid.meta["grid_xypos"][psfnum]
locy = int(float(loc[1]))
locx = int(float(loc[0]))
gridpsf = grid.data[psfnum, :, :]
# Using meta data, create the expected same PSF via calc_psf
mir.detector_position = (locx, locy)
mir.options['output_mode'] = 'Detector Sampled Image'
calcpsf = mir.calc_psf(oversample=oversample, fov_arcsec=fov_arcsec)["DET_SAMP"].data
kernel = astropy.convolution.Box2DKernel(width=1)
convpsf = astropy.convolution.convolve(calcpsf, kernel)
# Compare to make sure they are in fact the same PSF
assert gridpsf.shape == calcpsf.shape
assert np.array_equal(gridpsf, convpsf)
def test_all():
"""
Check that running all the detectors works (ie setting all_detectors=True). In
particular for NIRCam, test that the detectors pulled are correct
(shortwave vs longwave) with respect to the filter
"""
nir = webbpsf_core.NIRCam()
longfilt = "F250M"
shortfilt = "F140M"
# Case 1: Shortwave -> check that only the SW detectors are applied for the SW filter
nir.filter = shortfilt
grid1 = nir.psf_grid(all_detectors=True, num_psfs=1, add_distortion=False, fov_pixels=1, oversample=2)
det_list = []
for hdu in grid1:
det_list.append(hdu.meta["detector"][0])
assert len(grid1) == len(gridded_library.CreatePSFLibrary.nrca_short_detectors)
assert set(det_list) == set(gridded_library.CreatePSFLibrary.nrca_short_detectors)
# Case 2: Longwave -> check that only the LW detectors are applied for the LW filter
nir.filter = longfilt
grid2 = nir.psf_grid(all_detectors=True, num_psfs=1, add_distortion=False, fov_pixels=1, oversample=2)
det_list = []
for hdu in grid2:
det_list.append(hdu.meta["detector"][0])
assert len(grid2) == len(gridded_library.CreatePSFLibrary.nrca_long_detectors)
assert set(det_list) == set(gridded_library.CreatePSFLibrary.nrca_long_detectors)
def test_one_psf():
"""Check that setting num_psfs = 1 produces the PSF in the right location"""
oversample = 2
fov_pixels = 11
nis = webbpsf_core.NIRISS()
nis.filter = "F140M"
# Case 1: The PSF is centered on the detector (with single_psf_centered=True)
grid1 = nis.psf_grid(all_detectors=False, num_psfs=1, add_distortion=True, oversample=oversample,
fov_pixels=fov_pixels, single_psf_centered=True, use_detsampled_psf=False)
# Case 2: The PSF is set to a specific position (with nis.detector_position = (10, 0))
nis.detector_position = (10, 0) # it's set as (x,y)
grid2 = nis.psf_grid(all_detectors=False, num_psfs=1, add_distortion=True, oversample=oversample,
fov_pixels=fov_pixels, single_psf_centered=False, use_detsampled_psf=False)
# Compare Case 2 to the calc_psf output to make sure it's placing the PSF in the right location
calc = nis.calc_psf(add_distortion=True, oversample=2, fov_pixels=11)
kernel = astropy.convolution.Box2DKernel(width=oversample)
convpsf = astropy.convolution.convolve(calc["OVERDIST"].data, kernel)
assert grid1.meta["grid_xypos"] == [(1023, 1023)] # the default is the center of the NIS aperture
assert grid2.meta["grid_xypos"] == [(10, 0)] # it's in (x,y)
assert np.array_equal(convpsf, grid2.data[0, :, :])
def test_nircam_errors():
"""Check that there are checks for incorrect value setting - particularly with NIRCam"""
longfilt = "F250M"
shortfilt = "F140M"
longdet = "NRCB5"
shortdet = "NRCA3"
nir = webbpsf_core.NIRCam()
# Shouldn't error - applying SW to SW and LW to LW
nir.filter = longfilt
nir.detector = longdet
nir.psf_grid(all_detectors=False, num_psfs=1, fov_pixels=1, detector_oversample=2, fft_oversample=2)
nir.filter = shortfilt
nir.detector = shortdet
nir.psf_grid(all_detectors=False, num_psfs=1, fov_pixels=1, detector_oversample=2, fft_oversample=2)
# Should error - Bad filter/detector combination (LW filt to SW det)
with pytest.raises(RuntimeError) as excinfo: # Errors inside calc_psf() call
nir.filter = longfilt
nir.detector = shortdet
nir.psf_grid(all_detectors=False, num_psfs=1, fov_pixels=1) # error
assert "RuntimeError" in str(excinfo)
# Should error - Bad filter/detector combination (SW filt to LW det)
with pytest.raises(RuntimeError) as excinfo: # Errors inside calc_psf() call
nir.filter = shortfilt
nir.detector = longdet
nir.psf_grid(all_detectors=False, num_psfs=1, fov_pixels=1) # error
assert "RuntimeError" in str(excinfo)
# Should error - Bad num_psfs entry (must be a square number)
with pytest.raises(ValueError) as excinfo:
nir.psf_grid(all_detectors=False, num_psfs=2, fov_pixels=1) # error
assert "ValueError" in str(excinfo)
def test_saving(tmpdir):
"""Test saving files works properly"""
# Create a temp directory to place file in
file = str(tmpdir.join("test1"))
# Test using default calc_psf values
fgs = webbpsf_core.FGS()
fgs.filter = "FGS"
fgs.detector = "FGS2"
grid = fgs.psf_grid(all_detectors=False, num_psfs=4, save=True, outfile=file, overwrite=True)
# Check that the saved file matches the returned file (and thus that the save worked through properly)
with fits.open(os.path.join(file[:-5], "test1_fgs2_fgs.fits")) as infile:
# Check data
assert np.array_equal(infile[0].data, grid.data)
# Check meta data
model = utils.to_griddedpsfmodel(infile)
assert model.meta.keys() == grid.meta.keys()
assert model.meta["grid_xypos"] == grid.meta["grid_xypos"]
assert model.meta["oversampling"] == grid.meta["oversampling"]
# Remove temporary directory
tmpdir.remove()
| 38.995098
| 106
| 0.69956
|
a406d93cf2e32116c3941f9abb5b90bb3ff01b92
| 806
|
py
|
Python
|
func.py
|
cclauss/Pythonista-sm
|
ef5c6527f36334a2b4dc3f0a92f957161aa3bdd3
|
[
"Apache-2.0"
] | 3
|
2021-08-23T02:49:09.000Z
|
2021-08-24T01:48:14.000Z
|
func.py
|
cclauss/Pythonista-sm
|
ef5c6527f36334a2b4dc3f0a92f957161aa3bdd3
|
[
"Apache-2.0"
] | null | null | null |
func.py
|
cclauss/Pythonista-sm
|
ef5c6527f36334a2b4dc3f0a92f957161aa3bdd3
|
[
"Apache-2.0"
] | 1
|
2021-08-23T03:02:39.000Z
|
2021-08-23T03:02:39.000Z
|
# -*-coding:utf-8-*-
from random import choice
def xor(a, b):
return list(map(lambda x, y: x ^ y, a, b))
def rotl(x, n):
return ((x << n) & 0xffffffff) | ((x >> (32 - n)) & 0xffffffff)
def get_uint32_be(key_data):
return (key_data[0] << 24) | (key_data[1] << 16) | (key_data[2] << 8) | (
key_data[3])
def put_uint32_be(n):
return [((n >> 24) & 0xff), ((n >> 16) & 0xff), ((n >> 8) & 0xff), (n & 0xff)]
def padding(data, block=16):
return data + [(16 - len(data) % block)
for _ in range(16 - len(data) % block)]
def un_padding(data):
return data[:-data[-1]]
def list_to_bytes(data):
return b''.join([bytes((i, )) for i in data])
def bytes_to_list(data):
return [i for i in data]
def random_hex(x):
return ''.join([choice('0123456789abcdef') for _ in range(x)])
| 19.190476
| 79
| 0.581886
|
d1009416e76a7f24f5482519f2c9b6a0331099b6
| 551
|
py
|
Python
|
test/code/for/for3.py
|
BBN-Q/pyqgl2
|
7acc8b244ee7799c21df03ecff8325e15cdb94d3
|
[
"Apache-2.0"
] | 10
|
2017-09-30T14:31:42.000Z
|
2021-12-12T07:52:05.000Z
|
test/code/for/for3.py
|
BBN-Q/pyqgl2
|
7acc8b244ee7799c21df03ecff8325e15cdb94d3
|
[
"Apache-2.0"
] | 56
|
2017-04-11T15:47:15.000Z
|
2019-10-11T15:50:35.000Z
|
test/code/for/for3.py
|
BBN-Q/pyqgl2
|
7acc8b244ee7799c21df03ecff8325e15cdb94d3
|
[
"Apache-2.0"
] | 3
|
2019-06-20T07:08:39.000Z
|
2020-10-24T19:29:05.000Z
|
from qgl2.qgl2 import concur, qreg, qgl2decl
from qgl2.qgl2 import Qbit
@qgl2decl
def func_a1(a: qreg, b: qreg, c: qreg):
with concur:
for q in [a, b, c]:
X90(a)
func_b(q)
@qgl2decl
def func_a2(a: qreg, b: qreg, c: qreg):
for q in [a, b, c]:
X90(a)
func_b(q)
@qgl2decl
def func_b(a: qreg):
for x in [1, 2, 3]:
X180(a, x)
for x in [4, 5, 6]:
Y90(a, x)
@qgl2main
def main():
x = Qbit(1)
y = Qbit(2)
z = Qbit(3)
func_a1(x, y, z)
func_a2(x, y, z)
| 15.742857
| 44
| 0.504537
|
ad819f0433d0d28b2c26f36d2d21cc5b6218d495
| 4,770
|
py
|
Python
|
sif/samplers/elliptical_slice_sampler.py
|
JamesBrofos/Sif
|
a38ddb14f598f9f35d3ed9e872260b938e961433
|
[
"MIT"
] | 1
|
2022-03-24T18:59:27.000Z
|
2022-03-24T18:59:27.000Z
|
sif/samplers/elliptical_slice_sampler.py
|
JamesBrofos/Sif
|
a38ddb14f598f9f35d3ed9e872260b938e961433
|
[
"MIT"
] | 4
|
2020-03-24T15:39:08.000Z
|
2021-02-02T21:44:05.000Z
|
sif/samplers/elliptical_slice_sampler.py
|
JamesBrofos/Sif
|
a38ddb14f598f9f35d3ed9e872260b938e961433
|
[
"MIT"
] | 1
|
2019-11-05T19:28:06.000Z
|
2019-11-05T19:28:06.000Z
|
import numpy as np
from numpy.random import multivariate_normal, uniform
class EllipticalSliceSampler:
"""Elliptical Slice Sampler Class
The elliptical slice sampling algorithm is a Markov chain Monte carlo
approach to sampling from posterior distributions that consist of an
arbitrary likelihood times a multivariate normal prior. The elliptical
slice sampling algorithm is advantageous because it is conceptually simple
and easy to implement and because it is has no free parameters.
The algorithm operates by randomly selecting a candidate from an ellipse
defined by two vectors, one of which is assumed to be drawn from the target
posterior and another that is an auxiliary random sample of a zero-mean
multivariate normal. The algorithm iteratively shrinks the range from which
candidates can be drawn until a candidate proposal is accepted.
"""
def __init__(self, mean, covariance, log_likelihood_func):
"""Initialize the parameters of the elliptical slice sampler object.
Parameters:
mean (numpy array): A mean vector of a multivariate Gaussian.
covariance (numpy array): A two-dimensional positive-definite
covariance matrix of a multivariate Gaussian.
log_likelihood_func (function): A log-likelihood function that maps
a given sample (as its exclusive input) to a real number
reflecting the log-likelihood of the observational data under
the input parameters.
"""
self.mean = mean
self.covariance = covariance
self.log_likelihood_func = log_likelihood_func
def __sample(self, f):
"""Internal function that draws an individual sample according to the
elliptical slice sampling routine. The input is drawn from the target
distribution and the output is as well.
Parameters:
f (numpy array): A vector representing a parameter state that has
been sampled from the target posterior distribution. Note that
a sufficiently high `burnin` parameter can be leveraged to
achieve good mixing for this purpose.
"""
# Choose the ellipse for this sampling iteration.
nu = multivariate_normal(np.zeros(self.mean.shape), self.covariance)
# Set the candidate acceptance threshold.
log_y = self.log_likelihood_func(f) + np.log(uniform())
# Set the bracket for selecting candidates on the ellipse.
theta = np.random.uniform(0., 2.*np.pi)
theta_min, theta_max = theta - 2.*np.pi, theta
# Iterates until an candidate is selected.
while True:
# Generates a point on the ellipse defines by `nu` and the input. We
# also compute the log-likelihood of the candidate and compare to
# our threshold.
fp = (f - self.mean)*np.cos(theta) + nu*np.sin(theta) + self.mean
log_fp = self.log_likelihood_func(fp)
if log_fp > log_y:
return fp
else:
# If the candidate is not selected, shrink the bracket and
# generate a new `theta`, which will yield a new candidate
# point on the ellipse.
if theta < 0.:
theta_min = theta
else:
theta_max = theta
theta = uniform(theta_min, theta_max)
def sample(self, n_samples, burnin=1000):
"""This function is user-facing and is used to generate a specified
number of samples from the target distribution using elliptical slice
sampling. The `burnin` parameter defines how many iterations should be
performed (and excluded) to achieve convergence to the target
distribution.
Parameters:
n_samples (int): The number of samples to produce from this sampling
routine.
burnin (int, optional): The number of burnin iterations to perform.
This is necessary to achieve samples that are representative of
the true posterior and correctly characterize uncertainty.
"""
# Compute the total number of samples.
total_samples = n_samples + burnin
# Initialize a matrix to store the samples. The first sample is chosen
# to be a draw from the multivariate normal prior.
samples = np.zeros((total_samples, self.covariance.shape[0]))
samples[0] = multivariate_normal(mean=self.mean, cov=self.covariance)
for i in range(1, total_samples):
samples[i] = self.__sample(samples[i-1])
# Throw out the burnin iterations.
return samples[burnin:]
| 48.673469
| 80
| 0.656184
|
3726a56e530db949e18d19c23650c2724564d748
| 1,303
|
py
|
Python
|
syloga/utils/predicates.py
|
xaedes/python-symbolic-logic-to-gate
|
a0dc9be9e04290008cf709fac789d224ab8c14b0
|
[
"MIT"
] | null | null | null |
syloga/utils/predicates.py
|
xaedes/python-symbolic-logic-to-gate
|
a0dc9be9e04290008cf709fac789d224ab8c14b0
|
[
"MIT"
] | null | null | null |
syloga/utils/predicates.py
|
xaedes/python-symbolic-logic-to-gate
|
a0dc9be9e04290008cf709fac789d224ab8c14b0
|
[
"MIT"
] | null | null | null |
import collections.abc
def is_hashable(x):
"""
Determines whether x is hashable.
:returns: True if the specified x is hashable, False otherwise.
:rtype: bool
"""
return hasattr(x, "__hash__") and callable(x.__hash__)
def is_mappable_collection(expression):
non_collection_types = (str, bytes, bytearray)
collection_types = (collections.abc.Sequence, dict)
if isinstance(expression, non_collection_types): return False
if isinstance(expression, collection_types): return True
def decide_predicate_usage(predicate, default_predicate=False, list_combinator = any):
p = predicate
dp = default_predicate
decide_list = lambda p, dp, x: list_combinator((decide_predicate_usage(p_,dp)(x) for p_ in p))
is_container = lambda f: isinstance(f, collections.abc.Container)
return lambda x: (
p if type(p) == bool else
isinstance(x, p) if type(p) == type else
p(x) if callable(p) else
str(x).startswith(p) if type(p) == str else
decide_list(p,dp,x) if type(p) == list else
(x in p) if is_container(p) else
decide_predicate_usage(dp,dp)(x) if p != dp else
dp
)
| 37.228571
| 98
| 0.6132
|
429905f0f2103ce1286258397621e2880e677679
| 4,084
|
py
|
Python
|
application/workprogramsapp/expertise/serializers.py
|
18ariana/analytics_backend
|
bfcda70564dd14dadb72de6a70fe2d66790eae85
|
[
"MIT"
] | null | null | null |
application/workprogramsapp/expertise/serializers.py
|
18ariana/analytics_backend
|
bfcda70564dd14dadb72de6a70fe2d66790eae85
|
[
"MIT"
] | null | null | null |
application/workprogramsapp/expertise/serializers.py
|
18ariana/analytics_backend
|
bfcda70564dd14dadb72de6a70fe2d66790eae85
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from dataprocessing.serializers import userProfileSerializer
# from workprogramsapp.educational_program.serializers import EducationalProgramSerializer
from workprogramsapp.expertise.models import UserExpertise, Expertise, ExpertiseComments
from workprogramsapp.serializers import WorkProgramShortForExperiseSerializer
class UserExpertiseSerializer(serializers.ModelSerializer):
class Meta:
model = UserExpertise
fields = "__all__"
def to_representation(self, value):
self.fields['expert'] = userProfileSerializer(many=False)
self.fields['expertise'] = ExpertiseSerializer(many=False, read_only=True)
return super().to_representation(value)
class UserExpertiseForExpertiseSerializer(serializers.ModelSerializer):
class Meta:
model = UserExpertise
fields = ['id','expert', 'stuff_status', 'user_expertise_status', 'expert_result']
def to_representation(self, value):
self.fields['expert'] = userProfileSerializer(many=False)
return super().to_representation(value)
class ExpertiseSerializer(serializers.ModelSerializer):
"""
Автоматически добавляет пользователя-создателя как лидера экспертизы
"""
user_status_in_expertise = serializers.SerializerMethodField()
def get_user_status_in_expertise(self, instance):
request = self.context.get("request")
user_statuses = \
{
"expertise_master": False,
"expertise_member": bool(UserExpertise.objects.filter(
expert=request.user, expertise_id=instance.id,
stuff_status="EX")),
"structural_leader": bool(Expertise.objects.filter(
pk=instance.id,
work_program__structural_unit__user_in_structural_unit__user=request.user,
work_program__structural_unit__user_in_structural_unit__status__in=["leader", "deputy"]).distinct())
}
for group in request.user.groups.all():
if group.name == "expertise_master":
user_statuses["expertise_master"] = True
return user_statuses
def create(self, validated_data):
is_exp_exist = Expertise.objects.filter(work_program=validated_data['work_program'])
if is_exp_exist:
print("такая экспертиза уже существует")
return is_exp_exist[0]
exp = Expertise.objects.create(**validated_data)
request = self.context.get('request')
UserExpertise.objects.create(expertise=exp, expert=request.user, stuff_status="AU") # ???
return exp
def to_representation(self, value):
self.fields['work_program'] = WorkProgramShortForExperiseSerializer(many=False, read_only=True)
self.fields['experts'] = userProfileSerializer(many=True, read_only=True)
self.fields['expertse_users_in_rpd'] = UserExpertiseForExpertiseSerializer(many=True, read_only=True)
return super().to_representation(value)
class Meta:
model = Expertise
fields = "__all__"
# class ExpertiseWithUsersStatusSerializer(serializers.ModelSerializer):
# """
# Автоматически добавляет пользователя-создателя как лидера экспертизы
# """
# work_program = WorkProgramShortForExperiseSerializer(many=False, read_only=True)
# expertse_users_in_rpd = UserExpertiseForExpertiseSerializer(many=True, read_only=True)
#
#
# class Meta:
# model = Expertise
# fields = ['work_program', 'expertse_users_in_rpdd']
class CommentSerializer(serializers.ModelSerializer):
def to_representation(self, value):
self.fields['user_expertise'] = OnlyUserExpertiseSerializer(many=False, read_only=True)
return super().to_representation(value)
class Meta:
model = ExpertiseComments
fields = "__all__"
class OnlyUserExpertiseSerializer(serializers.ModelSerializer):
expert = userProfileSerializer(many=False)
class Meta:
model = UserExpertise
fields = ['expert']
| 38.168224
| 120
| 0.703967
|
0e014161fbaa35d955c951cc01f317c5e803ac51
| 1,492
|
py
|
Python
|
docs/conf.py
|
ionelmc/python-lazy-object-proxy
|
0581e276c6fd4c72c5b46cc45bdb3d341c16cfa9
|
[
"BSD-2-Clause"
] | 209
|
2015-01-05T22:05:19.000Z
|
2022-03-04T06:59:07.000Z
|
docs/conf.py
|
ionelmc/python-lazy-object-proxy
|
0581e276c6fd4c72c5b46cc45bdb3d341c16cfa9
|
[
"BSD-2-Clause"
] | 58
|
2015-03-10T06:28:03.000Z
|
2022-03-11T06:21:13.000Z
|
docs/conf.py
|
ionelmc/python-lazy-object-proxy
|
0581e276c6fd4c72c5b46cc45bdb3d341c16cfa9
|
[
"BSD-2-Clause"
] | 38
|
2015-01-05T13:28:44.000Z
|
2022-02-14T18:57:21.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import traceback
import sphinx_py3doc_enhanced_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
source_suffix = '.rst'
master_doc = 'index'
project = 'lazy-object-proxy'
year = '2014-2021'
author = 'Ionel Cristian Mărieș'
copyright = '{0}, {1}'.format(year, author)
try:
from pkg_resources import get_distribution
version = release = get_distribution('lazy_object_proxy').version
except Exception:
traceback.print_exc()
version = release = '1.6.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/ionelmc/python-lazy-object-proxy/issues/%s', '#'),
'pr': ('https://github.com/ionelmc/python-lazy-object-proxy/pull/%s', 'PR #'),
}
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
html_theme_options = {
'githuburl': 'https://github.com/ionelmc/python-lazy-object-proxy/'
}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| 27.127273
| 84
| 0.705764
|
41165c1e2720fe747fd40f7e94b8c1c70db67240
| 37
|
py
|
Python
|
processors/postprocessors/__init__.py
|
Zvezdin/blockchain-predictor
|
df6f939037471dd50b7b9c96673d89b04b646ef2
|
[
"MIT"
] | 35
|
2017-10-25T17:10:35.000Z
|
2022-03-20T18:12:06.000Z
|
processors/postprocessors/__init__.py
|
Zvezdin/blockchain-predictor
|
df6f939037471dd50b7b9c96673d89b04b646ef2
|
[
"MIT"
] | 2
|
2017-09-20T17:39:15.000Z
|
2018-04-01T17:20:29.000Z
|
processors/postprocessors/__init__.py
|
Zvezdin/blockchain-predictor
|
df6f939037471dd50b7b9c96673d89b04b646ef2
|
[
"MIT"
] | 10
|
2017-12-01T13:47:04.000Z
|
2021-12-16T06:53:17.000Z
|
from .postprocessors_imports import *
| 37
| 37
| 0.864865
|
00c89d09d384ff3306ba9948eee9edc5197a70eb
| 4,605
|
py
|
Python
|
TermProject/crawler/crawler_thread.py
|
MadCreeper/SJTU_ICE2602
|
bb2b41e4a056ec1017f3a531c30c67172753289d
|
[
"MIT"
] | null | null | null |
TermProject/crawler/crawler_thread.py
|
MadCreeper/SJTU_ICE2602
|
bb2b41e4a056ec1017f3a531c30c67172753289d
|
[
"MIT"
] | null | null | null |
TermProject/crawler/crawler_thread.py
|
MadCreeper/SJTU_ICE2602
|
bb2b41e4a056ec1017f3a531c30c67172753289d
|
[
"MIT"
] | null | null | null |
# SJTU EE208
# -*-coding:utf-8-*-
import os
import math
import re
import string
import sys
from typing import final
import urllib
import urllib.error
import urllib.parse
import urllib.request
from urllib.request import Request, urlopen
import hashlib
import threading
import queue
import time
from bs4 import BeautifulSoup
import argparse
import bloomFilter # 自己实现的BloomFilter类
TIMEOUTSECONDS = 3 #访问超时时间
MAXFILENAMELENGTH = 50 #文件名最长不超过的长度
SELF_URL_MARKER = "SELF_URL_TAG:" # 爬取到的网页写入文件时,在html文件末尾附上该网页的url方便查询
header = {'User-Agent': 'user-agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36 Edg/80.0.361.54'}
def valid_filename(s):
valid_chars = "-_(). %s%s" % (string.ascii_letters, string.digits)
s = ''.join((c if c != '.' else '+') for c in s if c in valid_chars) # 去掉文件名.
return s[:MAXFILENAMELENGTH] + '.html' # 防止文件名过长
def get_page(page,coding = 'utf-8'):
global successful
global failed
try:
request = Request(page, headers=header)
content = urlopen(request,timeout = TIMEOUTSECONDS).read()
except:
raise ValueError
else:
return content.decode(coding)
def get_all_links(content, page): # html content, page url
links = []
soup = BeautifulSoup(content, features="html.parser")
for href_tag in soup.findAll("a",{"href" : re.compile("^http|^/|")}):
url = href_tag.get("href","")
if url[:4] != "http":
url = urllib.parse.urljoin(page,url)
links.append(url)
return links
def add_page_to_folder(page, content):
index_filename = 'index.txt'
folder = 'html_gog'
filename = valid_filename(page)
#print(filename)
index = open(index_filename, 'a')
index.write(str(page) + '\t' + str(filename) + '\n')
index.close()
#print(page_link)
if not os.path.exists(folder):
os.mkdir(folder)
f = open(os.path.join(folder, filename), 'w',encoding='utf-8') #在windows系统运行务必设置encoding='utf-8',否则系统会尝试用gbk写然后报错
f.write("<!- "+SELF_URL_MARKER + page + " -->" + "\n")
f.write(str(content))
f.close()
def match_required_url(url,restriction_url):
return (restriction_url == '*' or restriction_url in url)
count = 0
MAXCOUNT = 50
successful = 0
failed = 0
crawl_only = "https://www.gog.com/game/"
def crawl():
global successful
global failed
global crawl_only
while True:
page = q.get(block = True,timeout = TIMEOUTSECONDS)
if not crawled.find(page):
#print("current page:",page)
try:
print("getting:",page)
content = get_page(page)
except:
print(page,"not found or cannot open!")
failed += 1
q.task_done() # 访问url失败时也要调用queue.task_done()
continue
else:
successful += 1
add_page_to_folder(page, content)
outlinks = get_all_links(content, page)
#print(outlinks)
global count
for link in outlinks:
if (not crawled.find(link)) and count < MAXCOUNT and match_required_url(link,crawl_only):
q.put(link)
count += 1
if varLock.acquire():
graph[page] = outlinks
crawled.add(page)
varLock.release()
print("Tasks left:",q.qsize())
q.task_done()
if __name__ == '__main__':
# seed = str(sys.argv[1])
#method = sys.argv[2]
#max_page = int(sys.argv[3])
parser = argparse.ArgumentParser()
parser.add_argument("-s")
parser.add_argument("-thread")
parser.add_argument("-page")
args = parser.parse_args()
seed = args.s #起始网页url
THREAD_NUM = int(args.thread) # 线程数
MAXCOUNT = int(args.page) #目标网页数
start_time = time.time() # 计时器
varLock = threading.Lock()
q = queue.Queue()
bitset_len = 20 * MAXCOUNT
crawled = bloomFilter.BloomFilter(bitset_len, bloomFilter.get_optimal_k(bitset_len,MAXCOUNT))
graph = {}
q.put(seed)
for i in range(THREAD_NUM):
t = threading.Thread(target=crawl)
t.daemon = True
t.start()
print("Start Working!")
q.join() # 等待queue为空
end_time = time.time()
print(f"total time used:{(end_time - start_time)}")
print(f"successful: {successful}, failed: {failed}")
| 27.410714
| 169
| 0.593485
|
95dfa3a3620375551be1123939ae07b02b93773f
| 446
|
py
|
Python
|
examples/meetapp/modules/meetings/domain/time.py
|
pcah/python-clean-architecture
|
20630d0b3b4c00f6503a26cc98c45df12bc31b3b
|
[
"MIT"
] | 278
|
2019-01-10T07:57:29.000Z
|
2022-03-31T22:47:13.000Z
|
examples/meetapp/modules/meetings/domain/time.py
|
asuzukosi/python-clean-architecture
|
20630d0b3b4c00f6503a26cc98c45df12bc31b3b
|
[
"MIT"
] | 80
|
2018-11-17T23:44:39.000Z
|
2021-12-15T18:29:04.000Z
|
examples/meetapp/modules/meetings/domain/time.py
|
lhaze/dharma
|
20630d0b3b4c00f6503a26cc98c45df12bc31b3b
|
[
"MIT"
] | 29
|
2018-11-19T20:11:13.000Z
|
2022-03-02T06:27:34.000Z
|
import datetime
import typing as t
from meetapp.modules.shared_domain.clock import IClockService
from pca.domain import ValueObject
from pca.utils.dependency_injection import (
Inject,
inject,
)
class MeetingTerm(ValueObject):
start_date: t.Optional[datetime] = None
end_date: t.Optional[datetime] = None
@inject
def is_after_start(self, clock: IClockService = Inject()):
return clock.now() > self.start_date
| 22.3
| 62
| 0.73991
|
59ef1f04e894d72c0ecf0712cb5c700a7e2d9806
| 4,669
|
py
|
Python
|
hazel/util.py
|
aasensio/hazel2
|
d9b551915f5d2bb399e03b054dffe4ca42fedeb5
|
[
"MIT"
] | 17
|
2018-08-31T11:13:59.000Z
|
2022-01-12T02:30:56.000Z
|
hazel/util.py
|
aasensio/hazel2
|
d9b551915f5d2bb399e03b054dffe4ca42fedeb5
|
[
"MIT"
] | 26
|
2018-04-03T15:09:21.000Z
|
2021-05-27T10:10:45.000Z
|
hazel/util.py
|
aasensio/hazel2
|
d9b551915f5d2bb399e03b054dffe4ca42fedeb5
|
[
"MIT"
] | 3
|
2018-05-01T13:47:21.000Z
|
2019-09-23T20:49:08.000Z
|
import numpy as np
import h5py
from asciitree import LeftAligned
from collections import OrderedDict
from asciitree.drawing import BoxStyle, BOX_DOUBLE, BOX_BLANK
__all__ = ['i0_allen', '_extract_parameter_cycles', 'isint', 'fvoigt', 'lower_dict_keys', 'show_tree']
def i0_allen(wavelength, muAngle):
"""
Return the solar intensity at a specific wavelength and heliocentric angle
wavelength: wavelength in angstrom
muAngle: cosine of the heliocentric angle
"""
C = 2.99792458e10
H = 6.62606876e-27
if (muAngle == 0):
return 0.0
lambdaIC = 1e4 * np.asarray([0.20,0.22,0.245,0.265,0.28,0.30,0.32,0.35,0.37,0.38,0.40,0.45,0.50,0.55,0.60,0.80,1.0,1.5,2.0,3.0,5.0,10.0])
uData = np.asarray([0.12,-1.3,-0.1,-0.1,0.38,0.74,0.88,0.98,1.03,0.92,0.91,0.99,0.97,0.93,0.88,0.73,0.64,0.57,0.48,0.35,0.22,0.15])
vData = np.asarray([0.33,1.6,0.85,0.90,0.57, 0.20, 0.03,-0.1,-0.16,-0.05,-0.05,-0.17,-0.22,-0.23,-0.23,-0.22,-0.20,-0.21,-0.18,-0.12,-0.07,-0.07])
lambdaI0 = 1e4 * np.asarray([0.20,0.22,0.24,0.26,0.28,0.30,0.32,0.34,0.36,0.37,0.38,0.39,0.40,0.41,0.42,0.43,0.44,0.45,0.46,0.48,0.50,0.55,0.60,0.65,0.70,0.75,\
0.80,0.90,1.00,1.10,1.20,1.40,1.60,1.80,2.00,2.50,3.00,4.00,5.00,6.00,8.00,10.0,12.0])
I0 = np.asarray([0.06,0.21,0.29,0.60,1.30,2.45,3.25,3.77,4.13,4.23,4.63,4.95,5.15,5.26,5.28,5.24,5.19,5.10,5.00,4.79,4.55,4.02,3.52,3.06,2.69,2.28,2.03,\
1.57,1.26,1.01,0.81,0.53,0.36,0.238,0.160,0.078,0.041,0.0142,0.0062,0.0032,0.00095,0.00035,0.00018])
I0 *= 1e14 * (lambdaI0 * 1e-8)**2 / C
u = np.interp(wavelength, lambdaIC, uData)
v = np.interp(wavelength, lambdaIC, vData)
i0 = np.interp(wavelength, lambdaI0, I0)
return (1.0 - u - v + u * muAngle + v * muAngle**2)* i0
def _extract_parameter_cycles(s):
tmp = s[0].split('->')
value = float(tmp[0])
cycle1 = tmp[1].strip()
cycles = [cycle1] + s[1:]
return value, cycles
def isint(str):
try:
int(str)
return True
except ValueError:
return False
def isfloat(str):
if (str is None):
return False
try:
float(str)
return True
except ValueError:
return False
def toint(l):
return [int(x) if isint(x) else x for x in l]
def tofloat(l):
return [float(x) if isfloat(x) else None for x in l]
def tobool(l):
return True if l == 'True' else False
def onlyint(l):
return [i for i in l if isinstance(i, int)]
def fvoigt(damp,v):
"""
Fast implementation of the Voigt-Faraday function
Parameters
----------
damp : float
damping parameter
v : float
normalized wavelength (lambda-lambda0) / sigma
Returns
-------
voigt, faraday : float
Value of the Voigt and Faraday functions
Notes
-----
A rational approximation to the complex error function is used
after Hui, Armstrong, and Wray(1978, JQSRT 19, 509). H and F are
the real and imaginary parts of such function, respectively.
The procedure is inspired on that in SIR (Ruiz Cobo & del Toro
Iniesta 1992, ApJ 398, 385). On its turn, that routine was taken
from modifications by A. Wittmann (1986) to modifications by S.K.
Solanki (1985) to an original FORTRAN routine written by J.W. Harvey
and A. Nordlund.
"""
A = [122.607931777104326, 214.382388694706425, 181.928533092181549,\
93.155580458138441, 30.180142196210589, 5.912626209773153,\
0.564189583562615]
B = [122.60793177387535, 352.730625110963558, 457.334478783897737,\
348.703917719495792, 170.354001821091472, 53.992906912940207,\
10.479857114260399,1.]
z = np.array(damp*np.ones(len(v)) + -abs(v)*1j)
Z = ((((((A[6]*z+A[5])*z+A[4])*z+A[3])*z+A[2])*z+A[1])*z+A[0])/\
(((((((z+B[6])*z+B[5])*z+B[4])*z+B[3])*z+B[2])*z+B[1])*z+B[0])
h = np.real(Z)
f = np.sign(v)*np.imag(Z)*0.5
return h, f
def lower_dict_keys(d):
out = {}
for k, v in d.items():
out[k.lower()] = v
return out
def show_tree(hdf5_file):
tree = {hdf5_file: OrderedDict()}
f = h5py.File(hdf5_file, 'r')
for k, v in f.items():
tree[hdf5_file][k] = OrderedDict()
for k2, v2 in v.items():
tree[hdf5_file][k][f'{k2} -> {v2.shape} {v2.dtype}'] = OrderedDict()
chrs = dict(
UP_AND_RIGHT=u"\u2514",
HORIZONTAL=u"\u2500",
VERTICAL=u"\u2502",
VERTICAL_AND_RIGHT=u"\u251C"
)
tr = LeftAligned(draw=BoxStyle(gfx = chrs, horiz_len=1))
print(tr(tree))
| 31.761905
| 164
| 0.591776
|
9500142e5839de59843767c91bf48c8217f319c0
| 5,206
|
py
|
Python
|
setup.py
|
eliasfernandez/django-simplecms
|
fd2d4b0c53a1262a7253e2428e5bd8a4e8074ce4
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
eliasfernandez/django-simplecms
|
fd2d4b0c53a1262a7253e2428e5bd8a4e8074ce4
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
eliasfernandez/django-simplecms
|
fd2d4b0c53a1262a7253e2428e5bd8a4e8074ce4
|
[
"BSD-2-Clause"
] | null | null | null |
# coding=utf-8
from distutils.core import setup
import codecs
import os
import sys
from distutils.util import convert_path
from fnmatch import fnmatchcase
from setuptools import setup
import cms
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ["*.py", "*.pyc", "*$py.class", "*~", ".*", "*.bak"]
standard_exclude_directories = [
".*", "CVS", "_darcs", "./build", "./dist", "EGG-INFO", "*.egg-info"
]
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where=".",
package="",
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{"package": [files]}
Where ``files`` is a list of all the files in that package that
don"t match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won"t be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren"t
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), "", package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, "__init__.py"))
and not prefix):
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
stack.append((fn, prefix + name + "/", package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
long_description = open('README.md').read()
setup(
name='django-simplecms',
version=cms.VERSION,
packages=['cms',
'cms.context_processors',
'cms.templatetags',
'cms.utils',
],
include_package_data=True,
package_data=find_package_data('.'),
description='A Django app for cms purposes that covers 90% of tasks we need from a cms',
long_description=long_description,
author='Elías Fernández',
author_email='eliasfernandez@gmail.com',
license='BSD License',
url='http://github.com/eliasfernandez/django-simplecms',
platforms=["any"],
install_requires=[
"Django==1.11",
"pillow",
"django-mptt",
"django-tinymce",
"django-suit",
"django-filer"
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English (mostly)',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
| 36.661972
| 100
| 0.528045
|
8a9490609b406aac75fafb5cf975907e222fec60
| 5,941
|
py
|
Python
|
scripts/release_scripts/repo_specific_changes_fetcher.py
|
siddharthmehta02/oppia
|
973f777a6c5a8c3442846bda839e63856dfddf72
|
[
"Apache-2.0"
] | 4
|
2021-09-16T16:46:53.000Z
|
2022-02-06T13:00:14.000Z
|
scripts/release_scripts/repo_specific_changes_fetcher.py
|
siddharthmehta02/oppia
|
973f777a6c5a8c3442846bda839e63856dfddf72
|
[
"Apache-2.0"
] | null | null | null |
scripts/release_scripts/repo_specific_changes_fetcher.py
|
siddharthmehta02/oppia
|
973f777a6c5a8c3442846bda839e63856dfddf72
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that provides changes specific to oppia repo to be written
to release summary file.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
from core import python_utils
from scripts import common
GIT_CMD_DIFF_NAMES_ONLY_FORMAT_STRING = 'git diff --name-only %s %s'
GIT_CMD_SHOW_FORMAT_STRING = 'git show %s:feconf.py'
VERSION_RE_FORMAT_STRING = r'%s\s*=\s*(\d+|\.)+'
FECONF_SCHEMA_VERSION_CONSTANT_NAMES = [
'CURRENT_STATE_SCHEMA_VERSION', 'CURRENT_COLLECTION_SCHEMA_VERSION']
FECONF_FILEPATH = os.path.join('', 'feconf.py')
def get_changed_schema_version_constant_names(release_tag_to_diff_against):
"""Returns a list of schema version constant names in feconf that have
changed since the release against which diff is being checked.
Args:
release_tag_to_diff_against: str. The release tag to diff against.
Returns:
list(str). List of version constant names in feconf that changed.
"""
changed_version_constants_in_feconf = []
git_show_cmd = (GIT_CMD_SHOW_FORMAT_STRING % release_tag_to_diff_against)
old_feconf = common.run_cmd(git_show_cmd.split(' '))
with python_utils.open_file(FECONF_FILEPATH, 'r') as feconf_file:
new_feconf = feconf_file.read()
for version_constant in FECONF_SCHEMA_VERSION_CONSTANT_NAMES:
old_version = re.findall(
VERSION_RE_FORMAT_STRING % version_constant, old_feconf)[0]
new_version = re.findall(
VERSION_RE_FORMAT_STRING % version_constant, new_feconf)[0]
if old_version != new_version:
changed_version_constants_in_feconf.append(version_constant)
return changed_version_constants_in_feconf
def _get_changed_filenames_since_tag(release_tag_to_diff_against):
"""Get names of changed files from git since a given release.
Args:
release_tag_to_diff_against: str. The release tag to diff against.
Returns:
list(str). List of filenames for files that have been modified since
the release against which diff is being checked.
"""
diff_cmd = (
GIT_CMD_DIFF_NAMES_ONLY_FORMAT_STRING % (
release_tag_to_diff_against, 'HEAD'))
return common.run_cmd(diff_cmd.split(' ')).splitlines()
def get_setup_scripts_changes_status(release_tag_to_diff_against):
"""Returns a dict of setup script filepaths with a status of whether
they have changed or not since the release against which diff is
being checked.
Args:
release_tag_to_diff_against: str. The release tag to diff against.
Returns:
dict. Dict consisting of key as script name and value as boolean
indicating whether or not the script is modified since the release
against which diff is being checked.
"""
setup_script_filepaths = [
'scripts/%s' % item for item in [
'setup.py', 'setup_gae.py', 'install_third_party_libs.py',
'install_third_party.py']]
changed_filenames = _get_changed_filenames_since_tag(
release_tag_to_diff_against)
changes_dict = {
script_filepath: script_filepath in changed_filenames
for script_filepath in setup_script_filepaths}
return changes_dict
def get_changed_storage_models_filenames(release_tag_to_diff_against):
"""Returns a list of filepaths in core/storage whose contents have
changed since the release against which diff is being checked.
Args:
release_tag_to_diff_against: str. The release tag to diff against.
Returns:
list(str). The changed filenames in core/storage (if any).
"""
changed_model_filenames = _get_changed_filenames_since_tag(
release_tag_to_diff_against)
return [
model_filename for model_filename in changed_model_filenames
if model_filename.startswith('core/storage')]
def get_changes(release_tag_to_diff_against):
"""Collects changes in storage models, setup scripts and feconf
since the release tag passed in arguments.
Args:
release_tag_to_diff_against: str. The release tag to diff against.
Returns:
list(str). A list of lines to be written to the release summary file.
These lines describe the changed storage model names, setup script names
and feconf schema version names since the release against which diff is
being checked.
"""
changes = []
feconf_version_changes = get_changed_schema_version_constant_names(
release_tag_to_diff_against)
if feconf_version_changes:
changes.append(
'\n### Feconf version changes:\nThis indicates that a '
'migration may be needed\n\n')
for var in feconf_version_changes:
changes.append('* %s\n' % var)
setup_changes = get_setup_scripts_changes_status(
release_tag_to_diff_against)
if setup_changes:
changes.append('\n### Changed setup scripts:\n')
for var in setup_changes.keys():
changes.append('* %s\n' % var)
storage_changes = get_setup_scripts_changes_status(
release_tag_to_diff_against)
if storage_changes:
changes.append('\n### Changed storage models:\n')
for item in storage_changes:
changes.append('* %s\n' % item)
return changes
| 37.13125
| 80
| 0.722606
|
77a9e67414d95cbbf70efc27b9197a98209483f6
| 1,001
|
py
|
Python
|
concat.py
|
leoGCoelho/Snscrape-Dataframe-Builder
|
1759cade1d349c7e7fc84b26089385d0e4df132f
|
[
"MIT"
] | 2
|
2020-12-01T14:35:21.000Z
|
2021-05-17T05:58:28.000Z
|
concat.py
|
leoGCoelho/Snscrape-Dataframe-Builder
|
1759cade1d349c7e7fc84b26089385d0e4df132f
|
[
"MIT"
] | null | null | null |
concat.py
|
leoGCoelho/Snscrape-Dataframe-Builder
|
1759cade1d349c7e7fc84b26089385d0e4df132f
|
[
"MIT"
] | 2
|
2021-02-11T16:53:01.000Z
|
2022-01-15T21:44:17.000Z
|
import pandas
import json
import sys
def cleandata(data):
for jsonf in data:
#jsonf["texto"] = ""
jsonf.pop("user", None)
jsonf.pop("outlinks", None)
jsonf.pop("tcooutlinks", None)
jsonf.pop("replyCount", None)
jsonf.pop("retweetCount", None)
jsonf.pop("likeCount", None)
jsonf.pop("quoteCount", None)
jsonf.pop("conversationId", None)
jsonf.pop("likeCount", None)
jsonf.pop("source", None)
jsonf.pop("sourceUrl", None)
jsonf.pop("media", None)
jsonf.pop("retweetedTweet", None)
jsonf.pop("quotedTweet", None)
jsonf.pop("likeCount", None)
jsonf.pop("mentionedUsers", None)
return data
data1 = [json.loads(line) for line in open(sys.argv[1], 'r')]
data1 = cleandata(data1)
data2 = [json.loads(line) for line in open(sys.argv[2], 'r')]
data2 = cleandata(data2)
data = data1 + data2
json.dump(data, sys.argv[3])
print(len(data), len(data1), len(data2))
| 26.342105
| 61
| 0.607393
|
60bd79822b202fa9a25ff85bbb57e09aec57c957
| 983
|
py
|
Python
|
recipes/flask/__init__.py
|
lbovet/kivy-ios
|
679fc62256ffb87b6101840563fc2952d7166e1b
|
[
"MIT"
] | 2
|
2021-02-05T10:13:05.000Z
|
2021-11-12T07:31:57.000Z
|
recipes/flask/__init__.py
|
lbovet/kivy-ios
|
679fc62256ffb87b6101840563fc2952d7166e1b
|
[
"MIT"
] | 6
|
2020-01-31T18:04:48.000Z
|
2021-06-05T10:53:55.000Z
|
recipes/flask/__init__.py
|
lbovet/kivy-ios
|
679fc62256ffb87b6101840563fc2952d7166e1b
|
[
"MIT"
] | 1
|
2021-04-08T19:51:07.000Z
|
2021-04-08T19:51:07.000Z
|
# pure-python package, this can be removed when we'll support any python package
from toolchain import PythonRecipe, shprint
from os.path import join
import sh, os
class FlaskRecipe(PythonRecipe):
version = "master"
url = "https://github.com/mitsuhiko/flask/archive/{version}.zip"
depends = ["python","jinja2","werkzeug","itsdangerous","click"]
def install(self):
arch = list(self.filtered_archs)[0]
build_dir = self.get_build_dir(arch.arch)
os.chdir(build_dir)
hostpython = sh.Command(self.ctx.hostpython)
build_env = arch.get_env()
dest_dir = join(self.ctx.dist_dir, "root", "python")
build_env['PYTHONPATH'] = join(dest_dir, 'lib', 'python2.7', 'site-packages')
cmd = sh.Command("sed")
shprint(cmd, "-i", "", "s/setuptools/distutils.core/g", "./setup.py", _env=build_env)
shprint(hostpython, "setup.py", "install", "--prefix", dest_dir, _env=build_env)
recipe = FlaskRecipe()
| 39.32
| 93
| 0.660224
|
5f6249bc9801e57bbd60ec0126322e058368a7a8
| 3,473
|
py
|
Python
|
alipay/aop/api/domain/SignData.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/SignData.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/SignData.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SignData(object):
def __init__(self):
self._ori_app_id = None
self._ori_char_set = None
self._ori_out_biz_no = None
self._ori_sign = None
self._ori_sign_type = None
self._partner_id = None
@property
def ori_app_id(self):
return self._ori_app_id
@ori_app_id.setter
def ori_app_id(self, value):
self._ori_app_id = value
@property
def ori_char_set(self):
return self._ori_char_set
@ori_char_set.setter
def ori_char_set(self, value):
self._ori_char_set = value
@property
def ori_out_biz_no(self):
return self._ori_out_biz_no
@ori_out_biz_no.setter
def ori_out_biz_no(self, value):
self._ori_out_biz_no = value
@property
def ori_sign(self):
return self._ori_sign
@ori_sign.setter
def ori_sign(self, value):
self._ori_sign = value
@property
def ori_sign_type(self):
return self._ori_sign_type
@ori_sign_type.setter
def ori_sign_type(self, value):
self._ori_sign_type = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
def to_alipay_dict(self):
params = dict()
if self.ori_app_id:
if hasattr(self.ori_app_id, 'to_alipay_dict'):
params['ori_app_id'] = self.ori_app_id.to_alipay_dict()
else:
params['ori_app_id'] = self.ori_app_id
if self.ori_char_set:
if hasattr(self.ori_char_set, 'to_alipay_dict'):
params['ori_char_set'] = self.ori_char_set.to_alipay_dict()
else:
params['ori_char_set'] = self.ori_char_set
if self.ori_out_biz_no:
if hasattr(self.ori_out_biz_no, 'to_alipay_dict'):
params['ori_out_biz_no'] = self.ori_out_biz_no.to_alipay_dict()
else:
params['ori_out_biz_no'] = self.ori_out_biz_no
if self.ori_sign:
if hasattr(self.ori_sign, 'to_alipay_dict'):
params['ori_sign'] = self.ori_sign.to_alipay_dict()
else:
params['ori_sign'] = self.ori_sign
if self.ori_sign_type:
if hasattr(self.ori_sign_type, 'to_alipay_dict'):
params['ori_sign_type'] = self.ori_sign_type.to_alipay_dict()
else:
params['ori_sign_type'] = self.ori_sign_type
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SignData()
if 'ori_app_id' in d:
o.ori_app_id = d['ori_app_id']
if 'ori_char_set' in d:
o.ori_char_set = d['ori_char_set']
if 'ori_out_biz_no' in d:
o.ori_out_biz_no = d['ori_out_biz_no']
if 'ori_sign' in d:
o.ori_sign = d['ori_sign']
if 'ori_sign_type' in d:
o.ori_sign_type = d['ori_sign_type']
if 'partner_id' in d:
o.partner_id = d['partner_id']
return o
| 29.939655
| 79
| 0.596314
|
dca8b2877b3a8eb2abab7015fcfe8fb8c1ae32fb
| 3,158
|
py
|
Python
|
paper_figures/CIDU2012/fig_rbf_ridge_mu_z.py
|
aragilar/astroML
|
d3f6279eb632957662338761cb559a1dcd541fb0
|
[
"BSD-2-Clause"
] | 6
|
2019-08-31T16:43:43.000Z
|
2021-07-10T06:06:20.000Z
|
paper_figures/CIDU2012/fig_rbf_ridge_mu_z.py
|
aragilar/astroML
|
d3f6279eb632957662338761cb559a1dcd541fb0
|
[
"BSD-2-Clause"
] | 34
|
2018-09-10T22:35:07.000Z
|
2022-02-08T21:17:39.000Z
|
paper_figures/CIDU2012/fig_rbf_ridge_mu_z.py
|
aragilar/astroML
|
d3f6279eb632957662338761cb559a1dcd541fb0
|
[
"BSD-2-Clause"
] | 10
|
2017-06-22T09:21:19.000Z
|
2020-01-26T03:54:26.000Z
|
"""
Regularized Regression Example
------------------------------
This performs regularized regression on a gaussian basis function model.
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import lognorm
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from astroML.cosmology import Cosmology
from astroML.datasets import generate_mu_z
from astroML.density_estimation import FunctionDistribution
#----------------------------------------------------------------------
# generate data
np.random.seed(0)
z_sample, mu_sample, dmu = generate_mu_z(100, random_state=0)
cosmo = Cosmology()
z = np.linspace(0.01, 2, 1000)
mu = np.asarray(map(cosmo.mu, z))
#------------------------------------------------------------
# Manually convert data to a gaussian basis
# note that we're ignoring errors here, for the sake of example.
def gaussian_basis(x, mu, sigma):
return np.exp(-0.5 * ((x - mu) / sigma) ** 2)
centers = np.linspace(0, 1.8, 100)
widths = 0.2
X = gaussian_basis(z_sample[:, np.newaxis], centers, widths)
#------------------------------------------------------------
# Set up the figure to plot the results
fig = plt.figure(figsize=(12, 7))
fig.subplots_adjust(left=0.07, right=0.95,
bottom=0.08, top=0.95,
hspace=0.1, wspace=0.15)
classifier = [LinearRegression, Ridge, Lasso]
kwargs = [dict(), dict(alpha=0.005), dict(alpha=0.001)]
labels = ['Linear Regression', 'Ridge Regression', 'Lasso Regression']
for i in range(3):
clf = classifier[i](fit_intercept=True, **kwargs[i])
clf.fit(X, mu_sample)
w = clf.coef_
fit = clf.predict(gaussian_basis(z[:, None], centers, widths))
# plot fit
ax = fig.add_subplot(231 + i)
ax.xaxis.set_major_formatter(plt.NullFormatter())
# plot curves for regularized fits
if i == 0:
ax.set_ylabel('$\mu$')
else:
ax.yaxis.set_major_formatter(plt.NullFormatter())
curves = 37 + w * gaussian_basis(z[:, np.newaxis], centers, widths)
curves = curves[:, abs(w) > 0.01]
ax.plot(z, curves,
c='gray', lw=1, alpha=0.5)
ax.plot(z, fit, '-k')
ax.plot(z, mu, '--', c='gray')
ax.errorbar(z_sample, mu_sample, dmu, fmt='.k', ecolor='gray', lw=1)
ax.set_xlim(0.001, 1.8)
ax.set_ylim(36, 48)
ax.text(0.05, 0.95, labels[i],
ha='left', va='top',
transform=ax.transAxes)
# plot weights
ax = plt.subplot(234 + i)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.5))
ax.set_xlabel('z')
if i == 0:
ax.set_ylabel(r'$\theta$')
w *= 1E-12
ax.text(0, 1, r'$\rm \times 10^{12}$',
transform=ax.transAxes, fontsize=16)
ax.scatter(centers, w, s=9, lw=0, c='k')
ax.set_xlim(-0.05, 1.8)
if i == 1:
ax.set_ylim(-2, 4)
elif i == 2:
ax.set_ylim(-0.5, 2)
ax.text(0.05, 0.95, labels[i],
ha='left', va='top',
transform=ax.transAxes)
plt.show()
| 30.660194
| 75
| 0.586764
|
699eb50da7f46d79f3feabd34ccbbab3ed2f4cc9
| 2,325
|
py
|
Python
|
pycspr/serialisation/byte_array/encoder/cl_value.py
|
MrKrautee/casper-python-sdk
|
13e9da83de8892583da7ada91e4231e6f85e5e13
|
[
"Apache-2.0"
] | 2
|
2021-10-05T07:50:20.000Z
|
2021-10-06T09:58:19.000Z
|
pycspr/serialisation/byte_array/encoder/cl_value.py
|
MrKrautee/casper-python-sdk
|
13e9da83de8892583da7ada91e4231e6f85e5e13
|
[
"Apache-2.0"
] | null | null | null |
pycspr/serialisation/byte_array/encoder/cl_value.py
|
MrKrautee/casper-python-sdk
|
13e9da83de8892583da7ada91e4231e6f85e5e13
|
[
"Apache-2.0"
] | null | null | null |
import typing
import pycspr.serialisation.byte_array.encoder.cl_complex as complex_encoder
import pycspr.serialisation.byte_array.encoder.cl_primitive as primitives_encoder
import pycspr.serialisation.byte_array.encoder.cl_type as type_encoder
from pycspr.types import CLTypeKey
from pycspr.types import CLValue
# Map: CL type <-> encoder.
ENCODERS = {
CLTypeKey.ANY: complex_encoder.encode_any,
CLTypeKey.BOOL: primitives_encoder.encode_bool,
CLTypeKey.BYTE_ARRAY: primitives_encoder.encode_byte_array,
CLTypeKey.I32: primitives_encoder.encode_i32,
CLTypeKey.I64: primitives_encoder.encode_i64,
CLTypeKey.KEY: complex_encoder.encode_storage_key,
CLTypeKey.LIST: complex_encoder.encode_list,
CLTypeKey.MAP: complex_encoder.encode_map,
CLTypeKey.OPTION: complex_encoder.encode_option,
CLTypeKey.PUBLIC_KEY: complex_encoder.encode_public_key,
CLTypeKey.STRING: primitives_encoder.encode_string,
CLTypeKey.TUPLE_1: complex_encoder.encode_tuple1,
CLTypeKey.TUPLE_2: complex_encoder.encode_tuple2,
CLTypeKey.TUPLE_3: complex_encoder.encode_tuple3,
CLTypeKey.U8: primitives_encoder.encode_u8,
CLTypeKey.U32: primitives_encoder.encode_u32,
CLTypeKey.U64: primitives_encoder.encode_u64,
CLTypeKey.U128: primitives_encoder.encode_u128,
CLTypeKey.U256: primitives_encoder.encode_u256,
CLTypeKey.U512: primitives_encoder.encode_u512,
CLTypeKey.UNIT: primitives_encoder.encode_unit,
CLTypeKey.RESULT: complex_encoder.encode_result,
CLTypeKey.UREF: complex_encoder.encode_uref,
}
def encode(value: CLValue) -> bytes:
"""Encodes a CL value as an array of bytes.
:param value: A CL value that encapsulates both the associated CL type & it's pythonic value representation.
:returns: A byte array representation conformant to CL serialisation protocol.
"""
encoder = ENCODERS[value.cl_type.typeof]
if value.cl_type.typeof in {CLTypeKey.LIST, CLTypeKey.OPTION}:
return encoder(value.parsed, ENCODERS[value.cl_type.inner_type.typeof])
else:
return encoder(value.parsed)
def encode_cl_value(entity: CLValue) -> bytes:
"""Encodes a CL value.
"""
return primitives_encoder.encode_u8_array(encode(entity)) + \
type_encoder.encode_cl_type(entity.cl_type)
| 39.40678
| 112
| 0.772473
|
3cf22817c517c6f885704c8ec9c12fa1d0fc1eb7
| 6,735
|
py
|
Python
|
tools/debug/common.py
|
gnomonsis/nncf_pytorch
|
9fc4a92b5cb1b2c240e633c4ffa69b4fae1917fb
|
[
"Apache-2.0"
] | null | null | null |
tools/debug/common.py
|
gnomonsis/nncf_pytorch
|
9fc4a92b5cb1b2c240e633c4ffa69b4fae1917fb
|
[
"Apache-2.0"
] | 4
|
2020-07-17T11:12:35.000Z
|
2021-12-15T15:20:24.000Z
|
tools/debug/common.py
|
gnomonsis/nncf_pytorch
|
9fc4a92b5cb1b2c240e633c4ffa69b4fae1917fb
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import os
import torch
from functools import partial
from torch import nn
from examples.common.model_loader import load_model
from examples.common.utils import print_statistics
from nncf.checkpoint_loading import load_state
from nncf.layers import NNCFConv2d, NNCFLinear
from nncf.model_creation import create_compressed_model
def dump_in_out_hook(module, inputs, output):
dump_out_hook(module, inputs, output)
dump_path = get_dump_path(module)
if dump_path:
key = 0
output_dir = os.path.abspath(os.path.join(dump_path, os.pardir))
file_name = os.path.basename(dump_path)
for input_ in inputs:
key += 1
input_data = input_.data.cpu().numpy().flatten()
dump_name = '.'.join([file_name, "in", str(key)])
npy_path, _ = save_dump(dump_name, output_dir, input_data)
add_full_dump_path(module, npy_path)
def dump_out_hook(module, inputs, output):
dump_path = get_dump_path(module)
if dump_path:
output_data = output.data.cpu().numpy().flatten()
output_dir = os.path.abspath(os.path.join(dump_path, os.pardir))
file_name = os.path.basename(dump_path)
dump_name = '.'.join([file_name, "out"])
npy_path, _ = save_dump(dump_name, output_dir, output_data, force=False)
add_full_dump_path(module, npy_path)
def get_dump_path(module):
if hasattr(module, "dump_path"):
return module.dump_path
return None
def set_dump_path(layer, path):
layer.dump_path = path
def add_full_dump_path(layer, full_path):
if not hasattr(layer, 'dump_full_paths'):
layer.dump_full_paths = []
layer.dump_full_paths.append(full_path)
def get_full_dump_paths(layer):
if hasattr(layer, 'dump_full_paths'):
return layer.dump_full_paths
return None
def is_weightable(layer):
return isinstance(layer, (nn.Conv2d, nn.Linear)) and \
not isinstance(layer, (NNCFConv2d, NNCFLinear))
def has_sparse_quant_weights(layer, name):
from nncf.quantization.layers import SymmetricQuantizer
from nncf.sparsity.rb.layers import RBSparsifyingWeight
return (isinstance(layer, RBSparsifyingWeight) and ('sparsified_weight' in name)) or \
(isinstance(layer, SymmetricQuantizer) and ('quantized_weight' in name))
def save_dump_(path, ext, saver, data, force=False):
full_path = '.'.join([path, ext])
if not os.path.exists(full_path) or force:
print("Saving dump to {}".format(full_path))
saver(full_path, data)
else:
print("Dump already exists " + full_path)
return full_path
def save_dump(dump_name, output_dir, data, force=False):
path = os.path.join(output_dir, dump_name)
npy_path = save_dump_(path, "npy", np.save, data, force)
txt_path = save_dump_(path, "txt", partial(np.savetxt, fmt="%s"), data, force)
return npy_path, txt_path
def register_print_hooks(path, model, data_to_compare, num_layers=-1, dump_activations=False, prefix='', idx=0):
for name, children in model.named_children():
name_full = "{}{}".format(prefix, name)
idx = register_print_hooks(path, children, data_to_compare, num_layers, dump_activations,
prefix=name_full + ".", idx=idx)
within_range = (num_layers == -1) or idx < num_layers
has_weights = has_sparse_quant_weights(children, name_full) or is_weightable(children)
within_type = has_weights if not dump_activations else dump_activations
if within_range and within_type:
# always there for activations if dump_activation is enabled
# always there for weights if dump_activation is disabled
name_full = name_full.replace('/', '_')
dump_path = os.path.join(path, '.'.join([str(idx), name_full]))
idx += 1
if is_weightable(children):
output_dir = os.path.abspath(os.path.join(dump_path, os.pardir))
file_name = os.path.basename(dump_path)
def dump_attr(attr):
if hasattr(children, attr):
dump_name = '.'.join([file_name, attr])
data = children.weight.data.numpy()
save_dump(dump_name, output_dir, data, force=False)
data_to_compare[dump_name] = data
dump_attr('weight')
dump_attr('bias')
else:
set_dump_path(children, dump_path)
hook = dump_in_out_hook if dump_activations else dump_out_hook
children.register_forward_hook(hook)
return idx
def load_torch_model(config, cuda=False):
weights = config.get('weights')
model = load_model(config.get('model'),
pretrained=config.get('pretrained', True) if weights is None else False,
num_classes=config.get('num_classes', 1000),
model_params=config.get('model_params', {}))
compression_ctrl, model = create_compressed_model(model, config)
if weights:
sd = torch.load(weights, map_location='cpu')
load_state(model, sd)
if cuda:
model = model.cuda()
model = torch.nn.DataParallel(model)
print_statistics(compression_ctrl.statistics())
return model
def compare_activations(ir_dump_txt, torch_dump_npy):
with open(ir_dump_txt, 'r') as fin:
first_line = fin.readline()
if "shape:" in first_line:
data = fin.read().splitlines(True)
with open(ir_dump_txt, 'w') as fout:
fout.writelines(data)
ie = np.loadtxt(ir_dump_txt, dtype=np.float32)
pt = np.load(torch_dump_npy)
print("Size, [ MIN, MAX ]")
print_info = lambda np_array: print(
"{} [{:.3f}, {:.3f}]".format(np_array.size, np_array.min().item(), np_array.max().item()))
print_info(ie)
print_info(pt)
print("Maximum of absolute difference: {:.7f}".format(abs(ie - pt).max()))
def print_args(args):
for arg in sorted(vars(args)):
print("{: <27s}: {}".format(arg, getattr(args, arg)))
| 38.267045
| 112
| 0.659688
|
3108988ec48dd8c6366194c264142f6616003fc3
| 7,035
|
py
|
Python
|
thola_nautobot/forms.py
|
inexio/thola-nautobot
|
0b152784c9c7c0562423595251960a9e102ba221
|
[
"BSD-2-Clause"
] | 5
|
2021-07-13T12:07:39.000Z
|
2021-07-23T08:18:46.000Z
|
thola_nautobot/forms.py
|
inexio/thola-nautobot
|
0b152784c9c7c0562423595251960a9e102ba221
|
[
"BSD-2-Clause"
] | 2
|
2021-07-06T11:04:17.000Z
|
2021-08-24T02:54:08.000Z
|
thola_nautobot/forms.py
|
inexio/thola-nautobot
|
0b152784c9c7c0562423595251960a9e102ba221
|
[
"BSD-2-Clause"
] | null | null | null |
"""Forms for thola nautobot."""
import django_rq
from django import forms
from django.conf import settings
from nautobot.dcim.models import Device, Site, DeviceRole
from thola_nautobot.choices import TholaOnboardingStatusChoice
from thola_nautobot.models import TholaConfig, TholaOnboarding
from thola_nautobot.thola.client import thola_read_available_components
from thola_nautobot.thola.snmp_config import SNMPConfig
PLUGIN_SETTINGS = settings.PLUGINS_CONFIG["thola_nautobot"]
class TholaConfigForm(forms.ModelForm):
"""Form for creating a new Thola Config instance."""
device = forms.ModelChoiceField(
queryset=Device.objects.all(),
required=True,
help_text="Device to monitor with Thola."
)
snmp_community = forms.CharField(
required=False,
label="SNMP community",
help_text="Community string for SNMP to use. (def: " + str(PLUGIN_SETTINGS["snmp_community"]) + ")"
)
snmp_version = forms.CharField(
required=False,
label="SNMP version",
help_text="SNMP version to use. (def: " + str(PLUGIN_SETTINGS["snmp_version"]) + ")"
)
snmp_port = forms.IntegerField(
required=False,
label="SNMP port",
help_text="Port for SNMP to use. (def: " + str(PLUGIN_SETTINGS["snmp_port"]) + ")"
)
snmp_discover_par_requests = forms.IntegerField(
required=False,
label="SNMP discover par requests",
help_text="The amount of parallel connection requests used while trying to get a valid SNMP connection. "
"(def: " + str(PLUGIN_SETTINGS["snmp_discover_par_requests"]) + ")"
)
snmp_discover_retries = forms.IntegerField(
required=False,
label="SNMP discover retries",
help_text="The retries used while trying to get a valid SNMP connection. (def: " + str(
PLUGIN_SETTINGS["snmp_discover_retries"]) + ")"
)
snmp_discover_timeout = forms.IntegerField(
required=False,
label="SNMP discover timeout",
help_text="The timeout in seconds used while trying to get a valid SNMP connection. (def: " + str(
PLUGIN_SETTINGS["snmp_discover_timeout"]) + ")"
)
http_password = forms.CharField(
required=False,
label="HTTP password",
help_text="Password for HTTP/HTTPS authentication."
)
http_port = forms.IntegerField(
required=False,
label="HTTP port",
help_text="Port for HTTP to use."
)
http_username = forms.CharField(
required=False,
label="HTTP username",
help_text="Username for HTTP/HTTPS authentication."
)
https_port = forms.IntegerField(
required=False,
label="HTTPS port",
help_text="Port for HTTPS to use."
)
class Meta:
model = TholaConfig
fields = [
"device",
"snmp_community",
"snmp_version",
"snmp_port",
"snmp_discover_par_requests",
"snmp_discover_retries",
"snmp_discover_timeout",
"http_password",
"http_port",
"http_username",
"https_port"
]
def save(self, commit=True, **kwargs):
"""Save the model and the associated components."""
model = super().save(commit=False)
snmp_config = SNMPConfig(model.snmp_community, model.snmp_version, model.snmp_port, model.snmp_discover_retries,
model.snmp_discover_timeout, model.snmp_discover_par_requests)
components = thola_read_available_components(snmp_config, model.device.primary_ip4)
if components.get('error'):
raise RuntimeError(components.get('error'))
model.interfaces = "interfaces" in components.get('available_components')
model.cpu = "cpu" in components.get('available_components')
model.memory = "memory" in components.get('available_components')
model.disk = "disk" in components.get('available_components')
model.hardware_health = "hardware_health" in components.get('available_components')
model.ups = "ups" in components.get('available_components')
model.server = "server" in components.get('available_components')
model.save()
return model
class TholaOnboardingForm(forms.ModelForm):
"""Form for creating a new Thola Onboarding task."""
ip = forms.CharField(
required=True,
label="IP address",
help_text="IP address of the device."
)
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
required=True,
help_text="Site of the device."
)
role = forms.ModelChoiceField(
queryset=DeviceRole.objects.all(),
required=True,
help_text="Role of the device."
)
snmp_community = forms.CharField(
required=False,
label="SNMP community",
help_text="Community string for SNMP to use. (def: " + str(PLUGIN_SETTINGS["snmp_community"]) + ")"
)
snmp_version = forms.CharField(
required=False,
label="SNMP version",
help_text="SNMP version to use. (def: " + str(PLUGIN_SETTINGS["snmp_version"]) + ")"
)
snmp_port = forms.IntegerField(
required=False,
label="SNMP port",
help_text="Port for SNMP to use. (def: " + str(PLUGIN_SETTINGS["snmp_port"]) + ")"
)
snmp_discover_par_requests = forms.IntegerField(
required=False,
label="SNMP discover par requests",
help_text="The amount of parallel connection requests used while trying to get a valid SNMP connection. "
"(def: " + str(PLUGIN_SETTINGS["snmp_discover_par_requests"]) + ")"
)
snmp_discover_retries = forms.IntegerField(
required=False,
label="SNMP discover retries",
help_text="The retries used while trying to get a valid SNMP connection. (def: " + str(
PLUGIN_SETTINGS["snmp_discover_retries"]) + ")"
)
snmp_discover_timeout = forms.IntegerField(
required=False,
label="SNMP discover timeout",
help_text="The timeout in seconds used while trying to get a valid SNMP connection. (def: " + str(
PLUGIN_SETTINGS["snmp_discover_timeout"]) + ")"
)
class Meta:
model = TholaOnboarding
fields = [
"ip",
"site",
"role",
"snmp_community",
"snmp_version",
"snmp_port",
"snmp_discover_par_requests",
"snmp_discover_retries",
"snmp_discover_timeout"
]
def save(self, commit=True, **kwargs):
"""Save the model and the associated components."""
model = super().save(commit=False)
model.status = TholaOnboardingStatusChoice.STATUS_PENDING
model.save()
queue = django_rq.get_queue('default')
queue.enqueue("thola_nautobot.worker.onboard_device", onboarding=model)
return model
| 33.822115
| 120
| 0.634968
|
03051b0be023643784d9e41f4aae7905620c3fb0
| 965
|
py
|
Python
|
src/my_happy_modin/data_management/functions/default_methods/str_default.py
|
ggservice007/my-happy-modin
|
ab293ecfa04516a5c9f76284e09b45cdd7588186
|
[
"Apache-2.0"
] | null | null | null |
src/my_happy_modin/data_management/functions/default_methods/str_default.py
|
ggservice007/my-happy-modin
|
ab293ecfa04516a5c9f76284e09b45cdd7588186
|
[
"Apache-2.0"
] | 2
|
2021-01-27T11:25:26.000Z
|
2021-01-27T12:47:53.000Z
|
src/my_happy_modin/data_management/functions/default_methods/str_default.py
|
ggservice007/my-happy-modin
|
ab293ecfa04516a5c9f76284e09b45cdd7588186
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to my_happy_modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The my_happy_modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from .series_default import SeriesDefault
class StrDefault(SeriesDefault):
@classmethod
def frame_wrapper(cls, df):
return df.squeeze(axis=1).str
| 45.952381
| 95
| 0.779275
|
6d39733c626d6ae40bd7089dc79580e67e5772f7
| 2,814
|
py
|
Python
|
yardstick/benchmark/scenarios/parser/parser.py
|
kkltcjk/kklt
|
5388eb439616a442dde496ef77ba6b71169369e0
|
[
"Apache-2.0"
] | null | null | null |
yardstick/benchmark/scenarios/parser/parser.py
|
kkltcjk/kklt
|
5388eb439616a442dde496ef77ba6b71169369e0
|
[
"Apache-2.0"
] | null | null | null |
yardstick/benchmark/scenarios/parser/parser.py
|
kkltcjk/kklt
|
5388eb439616a442dde496ef77ba6b71169369e0
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import print_function
from __future__ import absolute_import
import pkg_resources
import logging
import subprocess
from yardstick.benchmark.scenarios import base
LOG = logging.getLogger(__name__)
class Parser(base.Scenario):
"""running Parser Yang-to-Tosca module as a tool
validating output against expected outcome
more info https://wiki.opnfv.org/parser
"""
__scenario_type__ = "Parser"
SETUP_SCRIPT = "parser_setup.sh"
TEARDOWN_SCRIPT = "parser_teardown.sh"
PARSER_SCRIPT = "parser.sh"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.setup_done = False
def setup(self):
"""scenario setup"""
self.setup_script = pkg_resources.resource_filename(
"yardstick.benchmark.scenarios.parser",
Parser.SETUP_SCRIPT)
cmd = "%s" % (self.setup_script)
subprocess.call(cmd, shell=True)
self.setup_done = True
def run(self, result):
"""execute the translation"""
options = self.scenario_cfg['options']
yangfile = options.get("yangfile", '~/yardstick/samples/yang.yaml')
toscafile = options.get("toscafile", '~/yardstick/samples/tosca.yaml')
self.parser_script = pkg_resources.resource_filename(
"yardstick.benchmark.scenarios.parser",
Parser.PARSER_SCRIPT)
if not self.setup_done:
self.setup()
cmd1 = "%s %s %s" % (self.parser_script, yangfile, toscafile)
cmd2 = "chmod 777 %s" % (self.parser_script)
subprocess.call(cmd2, shell=True)
p = subprocess.Popen(cmd1, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.communicate()
print("yangtotosca finished")
result['yangtotosca'] = "success" if p.returncode == 0 else "fail"
def teardown(self):
''' for scenario teardown remove parser and pyang '''
self.teardown_script = pkg_resources.resource_filename(
"yardstick.benchmark.scenarios.parser",
Parser.TEARDOWN_SCRIPT)
subprocess.call(self.teardown_script, shell=True)
self.teardown_done = True
def _test():
'''internal test function'''
pass
if __name__ == '__main__':
_test()
| 32.72093
| 78
| 0.629353
|
8a47b6e5b5496c80fff7880ca098034da5ca4928
| 7,471
|
py
|
Python
|
week1/utilities/ltr_utils.py
|
gsingers/search_with_machine_learning_course
|
94622c24eb07e52793cf5e2289f2f69359bb87af
|
[
"Apache-2.0"
] | 11
|
2021-12-30T18:55:56.000Z
|
2022-03-15T17:33:10.000Z
|
week1/utilities/ltr_utils.py
|
gsingers/search_with_machine_learning_course
|
94622c24eb07e52793cf5e2289f2f69359bb87af
|
[
"Apache-2.0"
] | 9
|
2021-12-31T05:24:33.000Z
|
2022-03-08T07:44:45.000Z
|
week1/utilities/ltr_utils.py
|
gsingers/search_with_machine_learning_course
|
94622c24eb07e52793cf5e2289f2f69359bb87af
|
[
"Apache-2.0"
] | 127
|
2021-12-24T17:03:26.000Z
|
2022-03-13T17:20:21.000Z
|
import json
import requests
def create_rescore_ltr_query(user_query, query_obj, click_prior_query, ltr_model_name, ltr_store_name,
active_features=None, rescore_size=500, main_query_weight=1, rescore_query_weight=2):
# Create the base query, use a much bigger window
#add on the rescore
print("IMPLEMENT ME: create_rescore_ltr_query")
return query_obj
# take an existing query and add in an SLTR so we can use it for explains to see how much SLTR contributes
def create_sltr_simple_query(user_query, query_obj, click_prior_query, ltr_model_name, ltr_store_name, active_features=None):
# Create the base query, use a much bigger window
#add on the rescore
sltr = {
"sltr": {
"params": {
"keywords": user_query,
"click_prior_query": click_prior_query
},
"model": ltr_model_name,
# Since we are using a named store, as opposed to simply '_ltr', we need to pass it in
"store": ltr_store_name,
}
}
if active_features is not None and len(active_features) > 0:
sltr["active_features"] = active_features
query_obj["query"]["bool"]["should"].append(sltr)
return query_obj, len(query_obj["query"]["bool"]["should"])
def create_sltr_hand_tuned_query(user_query, query_obj, click_prior_query, ltr_model_name, ltr_store_name, active_features=None):
# Create the base query, use a much bigger window
#add on the rescore
sltr = {
"sltr": {
"params": {
"keywords": user_query,
"click_prior_query": click_prior_query
},
"model": ltr_model_name,
# Since we are using a named store, as opposed to simply '_ltr', we need to pass it in
"store": ltr_store_name,
}
}
if active_features is not None and len(active_features) > 0:
sltr["active_features"] = active_features
query_obj["query"]["function_score"]["query"]["bool"]["should"].append(sltr)
return query_obj, len(query_obj["query"]["function_score"]["query"]["bool"]["should"])
def create_feature_log_query(query, doc_ids, click_prior_query, featureset_name, ltr_store_name, size=200, terms_field="_id"):
print("IMPLEMENT ME: create_feature_log_query")
return None
# Item is a Pandas namedtuple
def get_features(item, exclusions, col_names):
features = {}
for idx, f in enumerate(item):
col_name = col_names[idx]
if col_name not in exclusions:
# add it to the features
# Do we also have a normalized version? If so, skip this one, else add.
# if we do have a normalized one, add it, but name it w/o the norm here so that it matches our featureset in LTR
# there probably is a better way of doing this ^^
normed = "%s_norm" % col_name
if normed not in col_names:
features[col_name.replace('_norm', '')] = f
return features
def to_xgb_format(qid, doc_id, rank, query_str, product_name, grade, features):
if features is not None:
featuresAsStrs = ["%s:%.4f" % (idx + 1, feature) for idx, feature in enumerate(features.values())]
else:
featuresAsStrs = ""
comment = "# %s\t%s\t%s\t%s" % (doc_id, rank, query_str, product_name)
return "%.4f\tqid:%s\t%s %s" % (grade, qid, "\t".join(featuresAsStrs), comment.replace('\n',''))
def write_training_file(train_data, output_file, feat_map):
print("Writing XGB Training file to %s" % (output_file))
col_names = train_data.keys()
# We don't want to write everything out, some items we've been tracking are reserved or not needed for the model
exclusions = {"query_id", "doc_id", "rank", "query", "sku", "product_name", "grade", "clicks", "num_impressions"}
with open(output_file, 'bw') as output:
for item in train_data.itertuples(index=False): # skip the first 'index' element from the DF
# Pull out the specific items from the Pandas named tuple. The rest goes in the features map.
# if there is a norm version, take that
#
features = get_features(item, exclusions, col_names)
xgb_format = to_xgb_format(item.query_id, item.doc_id, item.rank, item.query,
item.product_name, item.grade, features)
output.write(bytes(xgb_format + "\n", 'utf-8'))
# We need to write out the feature map, probably more needed here
if feat_map:
print("Writing feature map to %s" % feat_map)
with open(feat_map, 'w') as feat_map_file:
item = train_data.iloc[1:2]
features = get_features(item, exclusions, col_names)
feat_map_file.write("0\tna\tq\n")
for idx, feat in enumerate(features.keys()):
#https://docs.rs/xgboost/0.1.4/xgboost/struct.FeatureMap.html are the only docs I can find on the format
if feat != "onSale":
feat_map_file.write('{}\t{}\tq\n'.format(idx+1,feat))#idx+2 b/c we are one-based for this
else: #Kludgy way of handling onSale being at some point. For now, write it out as 'q'
# Bug in LTR prevents 'indicator'/boolean features, so model as q for now by
# encoding onSale as a percentage discount
feat_map_file.write('{}\t{}\tq\n'.format(idx+1,feat)) #make the q an i
def write_opensearch_ltr_model(model_name, model, model_file, objective="rank:pairwise"):
model_str = '[' + ','.join(list(model)) + ']'
#print(model_str)
os_model = {
"model": {
"name": model_name,
"model": {
"type": "model/xgboost+json",
"definition": '{"objective":"%s", "splits": %s}' % (objective, model_str)
}
}
}
print("Saving XGB LTR-ready model to %s.ltr" % model_file)
with open("%s.ltr" % model_file, 'w') as ltr_model:
ltr_model.write(json.dumps(os_model))
def create_ltr_store(ltr_model_path, auth, delete_old=True):
if delete_old:
resp = requests.delete(ltr_model_path, auth=auth, verify=False)
print("Deleted old store response status: %s" % resp.status_code)
# Create our new LTR storage
resp = requests.put(ltr_model_path, auth=auth, verify=False)
print("Create the new store at %s response status: %s" % (ltr_model_path, resp.status_code))
return resp
def post_featureset(featureset_path, ltr_feature_set, auth, headers={"Content-Type": 'application/json'}):
print("POSTing the featureset to %s" % (featureset_path))
resp = requests.post(featureset_path, headers=headers, data=json.dumps(ltr_feature_set), auth=auth, verify=False)
return resp
def delete_model(model_path, auth):
print("Deleting model from %s" % model_path)
response = requests.delete(model_path, auth=auth, verify=False)
print("\tDelete Model Response: %s: %s" % (response.status_code, response.text))
return response
def upload_model(model_path, os_model, auth):
print("Uploading model to %s" % model_path)
headers = {"Content-Type": 'application/json'}
response = requests.post(model_path, data=json.dumps(os_model), headers=headers, auth=auth, verify=False)
print("\tUpload Model Response: %s: %s" % (response.status_code, response.text))
return response
| 46.987421
| 129
| 0.641146
|
37bda1b97752813dbac9e4cab4d4b1cae1d2bb1e
| 721
|
py
|
Python
|
detectron2_ofa/modeling/meta_arch/build.py
|
qdmy/Adelaidet-Quantization
|
e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b
|
[
"Apache-2.0"
] | null | null | null |
detectron2_ofa/modeling/meta_arch/build.py
|
qdmy/Adelaidet-Quantization
|
e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b
|
[
"Apache-2.0"
] | null | null | null |
detectron2_ofa/modeling/meta_arch/build.py
|
qdmy/Adelaidet-Quantization
|
e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2_ofa.utils.registry import Registry
from third_party.quantization.convert_to_quantization import convert2quantization
META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip
META_ARCH_REGISTRY.__doc__ = """
Registry for meta-architectures, i.e. the whole model.
The registered object will be called with `obj(cfg)`
and expected to return a `nn.Module` object.
"""
def build_model(cfg):
"""
Built the whole model, defined by `cfg.MODEL.META_ARCHITECTURE`.
"""
meta_arch = cfg.MODEL.META_ARCHITECTURE
model = META_ARCH_REGISTRY.get(meta_arch)(cfg)
convert2quantization(model, cfg)
return model
| 32.772727
| 81
| 0.762829
|
ae3c23e8ad133f678ac0c9def2a065bb82465e49
| 2,229
|
py
|
Python
|
merlin/io/shuffle.py
|
mikemckiernan/core
|
7621a8c4596a9f002d846d4618736fcf2c606419
|
[
"Apache-2.0"
] | 1
|
2022-03-28T17:22:54.000Z
|
2022-03-28T17:22:54.000Z
|
merlin/io/shuffle.py
|
NVIDIA-Merlin/core
|
78ecdddf83735fa3041e0778d6fdac37c3c1dd92
|
[
"Apache-2.0"
] | 27
|
2022-02-17T18:18:05.000Z
|
2022-03-31T23:44:08.000Z
|
merlin/io/shuffle.py
|
mikemckiernan/core
|
7621a8c4596a9f002d846d4618736fcf2c606419
|
[
"Apache-2.0"
] | 5
|
2022-02-17T17:46:41.000Z
|
2022-03-11T03:23:28.000Z
|
#
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import enum
import warnings
import pandas as pd
from packaging.version import Version
_IGNORE_INDEX_SUPPORTED = Version(pd.__version__) >= Version("1.3.0")
try:
import cudf
_CUDF_IGNORE_INDEX_SUPPORTED = Version(cudf.__version__) >= Version("22.04.0")
except ImportError:
_CUDF_IGNORE_INDEX_SUPPORTED = None
class Shuffle(enum.Enum):
PER_PARTITION = 0
PER_WORKER = 1
FULL = 2
#
# Helper Function definitions
#
def _check_shuffle_arg(shuffle):
if shuffle is None:
return shuffle
if isinstance(shuffle, Shuffle):
if shuffle == Shuffle.FULL:
raise ValueError('`shuffle="full"` is not yet supported.')
elif shuffle is True:
shuffle = Shuffle.PER_WORKER
warnings.warn("`shuffle=True` is deprecated. Using `PER_WORKER`.", DeprecationWarning)
elif shuffle is False:
shuffle = None
else:
raise ValueError(f"`shuffle={shuffle}` not recognized.")
return shuffle
def shuffle_df(df, size=None, keep_index=False):
"""Shuffles a DataFrame, returning a new dataframe with randomly
ordered rows"""
size = size or len(df)
if isinstance(df, pd.DataFrame):
if _IGNORE_INDEX_SUPPORTED:
return df.sample(n=size, ignore_index=not keep_index)
else:
# Pandas<1.3.0
if keep_index:
return df.sample(n=size)
return df.sample(n=size).reset_index(drop=True)
else:
if _CUDF_IGNORE_INDEX_SUPPORTED:
return df.sample(n=size, ignore_index=not keep_index)
else:
return df.sample(n=size, keep_index=keep_index)
| 28.948052
| 94
| 0.685509
|
f6d88397320f26d43dbd14b9aeb19ddadeb6483e
| 1,742
|
py
|
Python
|
Shriram.py
|
Shriram2004/mobilerobot-openloopcontrol
|
58b0cf5a67a3b25c356a7ea6b7435fbef909134e
|
[
"BSD-3-Clause"
] | null | null | null |
Shriram.py
|
Shriram2004/mobilerobot-openloopcontrol
|
58b0cf5a67a3b25c356a7ea6b7435fbef909134e
|
[
"BSD-3-Clause"
] | null | null | null |
Shriram.py
|
Shriram2004/mobilerobot-openloopcontrol
|
58b0cf5a67a3b25c356a7ea6b7435fbef909134e
|
[
"BSD-3-Clause"
] | null | null | null |
from robomaster import robot
import time
from robomaster import camera
if __name__ == '__main__' :
ep_robot = robot.Robot()
ep_robot.initialize(conn_type="ap")
ep_chassis = ep_robot.chassis
ep_camera = ep_robot.camera
ep_led = ep_robot.led
'''
x = x-axis movement distance,( meters) [-5,5]
y = y-axis movement distance,( meters) [-5,5]
z = rotation about z axis ( degree)[-180,180]
xy_speed = xy axis movement speed,( unit meter/second) [0.5,2]
'''
print("Camera streaming started...")
ep_camera.start_video_stream(display=True, resolution=camera.STREAM_360P)
ep_chassis.move(x=1, y=0, z=0, xy_speed=0.75).wait_for_completed()
ep_led.set_led(comp="all",r=0,g=255,b=0,effect="on")
time.sleep(2)
ep_chassis.move(x=0, y=1, z=0, xy_speed=0.75).wait_for_completed()
ep_led.set_led(comp="all",r=255,g=0,b=0,effect="on")
time.sleep(2)
ep_chassis.move(x=1.5, y=0, z=0, xy_speed=0.75).wait_for_completed()
ep_led.set_led(comp="all",r=0,g=0,b=255,effect="on")
time.sleep(2)
ep_chassis.move(x=0, y=0, z=90, xy_speed=0.75).wait_for_completed()
ep_led.set_led(comp="all",r=255,g=0,b=0,effect="on")
time.sleep(2)
ep_chassis.move(x=2, y=0, z=0, xy_speed=0.75).wait_for_completed()
ep_led.set_led(comp="all",r=0,g=255,b=0,effect="on")
time.sleep(2)
ep_chassis.move(x=0, y=0, z=90, xy_speed=0.75).wait_for_completed()
ep_led.set_led(comp="all",r=0,g=0,b=255,effect="on")
time.sleep(2)
ep_chassis.move(x=2.5, y=0, z=0, xy_speed=0.75).wait_for_completed()
ep_camera.stop_video_stream()
print("Stopped video streaming...")
ep_robot.close()
| 30.561404
| 78
| 0.635476
|
1d247176b2a0de7d9d8f73fd1bc327141c1cc156
| 1,903
|
py
|
Python
|
nova/tests/functional/api_sample_tests/test_hosts.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 1,874
|
2015-01-04T05:18:34.000Z
|
2022-03-31T03:30:28.000Z
|
nova/tests/functional/api_sample_tests/test_hosts.py
|
woraser/nova
|
fc3890667e4971e3f0f35ac921c2a6c25f72adec
|
[
"Apache-2.0"
] | 132
|
2017-03-27T11:31:52.000Z
|
2022-03-30T08:45:02.000Z
|
nova/tests/functional/api_sample_tests/test_hosts.py
|
woraser/nova
|
fc3890667e4971e3f0f35ac921c2a6c25f72adec
|
[
"Apache-2.0"
] | 1,996
|
2015-01-04T15:11:51.000Z
|
2022-03-31T11:03:13.000Z
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api_sample_tests import api_sample_base
class HostsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = "os-hosts"
def test_host_startup(self):
response = self._do_get('os-hosts/%s/startup' % self.compute.host)
self._verify_response('host-get-startup', {}, response, 200)
def test_host_reboot(self):
response = self._do_get('os-hosts/%s/reboot' % self.compute.host)
self._verify_response('host-get-reboot', {}, response, 200)
def test_host_shutdown(self):
response = self._do_get('os-hosts/%s/shutdown' % self.compute.host)
self._verify_response('host-get-shutdown', {}, response, 200)
def test_host_maintenance(self):
response = self._do_put('os-hosts/%s' % self.compute.host,
'host-put-maintenance-req', {})
self._verify_response('host-put-maintenance-resp', {}, response, 200)
def test_host_get(self):
response = self._do_get('os-hosts/%s' % self.compute.host)
self._verify_response('host-get-resp', {}, response, 200)
def test_hosts_list(self):
response = self._do_get('os-hosts')
self._verify_response('hosts-list-resp', {}, response, 200)
| 40.489362
| 78
| 0.683657
|
2e7b681f5dfe300e2d21978047986ed64c3864f0
| 287
|
py
|
Python
|
acronym/acronym.py
|
oscantillomen/Exercism-Python
|
1a598769aff0e4dd58294fcd692ca0402061717e
|
[
"MIT"
] | null | null | null |
acronym/acronym.py
|
oscantillomen/Exercism-Python
|
1a598769aff0e4dd58294fcd692ca0402061717e
|
[
"MIT"
] | null | null | null |
acronym/acronym.py
|
oscantillomen/Exercism-Python
|
1a598769aff0e4dd58294fcd692ca0402061717e
|
[
"MIT"
] | null | null | null |
import re
def abbreviate(words):
cleanString = re.sub(r'[_-]', ' ', words)
word_list = re.split(r"\s", cleanString)
filtered_list = list(filter(lambda word: word, word_list))
acronym = "".join(list(map(lambda word: word[0], filtered_list)))
return(acronym.upper())
| 28.7
| 69
| 0.655052
|
1423e06cf801d2a7a5814d1e0e7fa33ad5f375da
| 388
|
py
|
Python
|
src/datasets/__init__.py
|
pengkangzaia/mvts-ano-eval
|
976ffa2f151c8f91ce007e9a455bb4f97f89f2c9
|
[
"MIT"
] | 24
|
2021-09-04T08:51:55.000Z
|
2022-03-30T16:45:54.000Z
|
src/datasets/__init__.py
|
pengkangzaia/mvts-ano-eval
|
976ffa2f151c8f91ce007e9a455bb4f97f89f2c9
|
[
"MIT"
] | 3
|
2021-10-12T02:34:34.000Z
|
2022-03-18T10:37:35.000Z
|
src/datasets/__init__.py
|
pengkangzaia/mvts-ano-eval
|
976ffa2f151c8f91ce007e9a455bb4f97f89f2c9
|
[
"MIT"
] | 15
|
2021-09-18T03:41:02.000Z
|
2022-03-21T09:03:01.000Z
|
from .dataset import Dataset
from .swat import Swat
from .wadi import Wadi
from .damadics import Damadics
from .msl_entity import Msl_entity
from .smap_entity import Smap_entity
from .smd_entity import Smd_entity
from .multi_entity_dataset import MultiEntityDataset
__all__ = [
'Dataset', 'Swat', 'Wadi', 'Damadics', 'Msl_entity', 'Smap_entity', 'Smd_entity', 'MultiEntityDataset'
]
| 29.846154
| 106
| 0.786082
|
a184f881311d4826481b37a2d183be0f83c72b49
| 10,189
|
py
|
Python
|
tulip/transys/compositions.py
|
krooken/tulip-control
|
bb004422b575dccea8d19c33acfeb04b37c62a5a
|
[
"BSD-3-Clause"
] | null | null | null |
tulip/transys/compositions.py
|
krooken/tulip-control
|
bb004422b575dccea8d19c33acfeb04b37c62a5a
|
[
"BSD-3-Clause"
] | null | null | null |
tulip/transys/compositions.py
|
krooken/tulip-control
|
bb004422b575dccea8d19c33acfeb04b37c62a5a
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 by California Institute of Technology
# and University of Texas at Austin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""Compositions of transys"""
import copy
from itertools import product
from functools import reduce
from operator import or_
from tulip.transys import KripkeStructure as KS
from tulip.transys import WeightedKripkeStructure as WKS
from tulip.transys import MarkovChain as MC
from tulip.transys import MarkovDecisionProcess as MDP
def sum_values(*values):
"""Return the sum of values, considering only elements that are not None.
An item v,w in values can be anything that contains __add__ function
such that v+0 and v+w is defined.
"""
# Cannot simply return sum([v for v in values if v is not None])
# because it does 0 + v which will not work for v of type, e.g., VectorCost
current = 0
for v in values:
if v is not None:
current = v + current
return current
def mult_values(*values):
"""Return the product of values, considering only elements that are not None.
An item v,w in values can be anything that contains __mul__ function
such that v*1 and v*w is defined.
"""
current = 1
for v in values:
if v is not None:
current = v * current
return current
def neglect_none(*values):
"""Return a tuple of values, considering only elements that are not None.
If the tuple only has one element, just return that element.
"""
ret = tuple([v for v in values if v is not None])
if len(ret) == 1:
return ret[0]
return ret
def synchronous_parallel(models, transition_attr_operations={}):
"""Construct a model that represents
the synchronous paralel composition of the given models
(i.e., tensor product in graph theory
https://en.wikipedia.org/wiki/Tensor_product_of_graphs)
It follows definition 2.42 (synchronous product) in
U{[BK08] <https://tulip-control.sourceforge.io/doc/bibliography.html#bk08>},
with the only exception that Act does not have the be the same
for all the models in models.
@type models: `list` of objects of types `KripeStructure`, `WeightedKripkeStructure`,
`MarkovChain` or `MarkovDecisionProcess`
@type transition_attr_operations: `dict` whose key is the transition attribute key
and value is the operation to be performed for this transition attribute.
For an attribute whose operation is not specified,
a tuple of attribute values from all models will be used.
@return: the synchronous parallel composition of all the objects in models
@rtype: one of the following types:
* L{transys.KripkeStructure}
* L{transys.WeightedKripkeStructure}
* L{transys.MarkovChain}
* L{transys.MarkovDecisionProcess}
"""
# Let models = [K_1, ..., K_n].
# Let
# * prod_states = [S_1, ..., S_n] where S_i is the set of states of K_i
# * prod_initials = [I_1, ..., I_n] where I_i is the set of initial
# states of K_i
prod_states = []
prod_initials = []
# Construct prod_states and prod_initials and
# construct the composed model ts with all the atomic propositions.
composed_type = _get_composed_model_type(models)
if composed_type is None:
raise TypeError("Can only compose [WKS, KS] or [MDP, MC, KS]")
ts = composed_type()
for model in models:
prod_states.append(set(model.states))
prod_initials.append(model.states.initial)
ts.atomic_propositions.add_from(model.atomic_propositions)
# Compute the state of ts: S = S_1 \times ... \times S_n.
# Also, compute the label at each state (s_1, ..., s_n).
# By definition L(s_1, ..., s_n) = \bigcup_i L_i(s_i)
# where L_i is the labeling function of K_i.
for state in product(*prod_states):
ts.states.add(state)
ts.states[state]["ap"] = reduce(
or_, [models[i].states[state[i]]["ap"] for i in range(len(models))]
)
# Compute the initial state of ts: I = I_1 \times ... \times I_n
for state in product(*prod_initials):
ts.states.initial.add(state)
# Compute the set of actions
if type(ts) == MDP:
prod_actions = [list(m.actions) for m in models if type(m) == MDP]
if len(prod_actions) == 1:
ts.actions.add_from(prod_actions[0])
else:
ts.actions.add_from(list(product(*prod_actions)))
if WKS.cost_label not in transition_attr_operations:
transition_attr_operations[WKS.cost_label] = sum_values
if MC.probability_label not in transition_attr_operations:
transition_attr_operations[MC.probability_label] = mult_values
if MDP.action_label not in transition_attr_operations:
transition_attr_operations[MDP.action_label] = neglect_none
# Compute the transition of ts according to the rule
# ((s_1, ..., s_n), (a_1, ..., a_n), (s_1', ..., s_n'))
# in the transition relation of ts
# iff (s_i, a_i, s_i') is in the transition relation of K_i for all i
for from_state in ts.states:
transitions = [
models[coord].transitions.find(from_state[coord])
for coord in range(len(models))
]
for transition in product(*transitions):
to_state = tuple(t[1] for t in transition)
attr = _get_transition_attr(transition, transition_attr_operations)
ts.transitions.add(
from_state, to_state, attr,
)
return ts
def apply_policy(model, policy):
"""Apply the policy on the MarkovDecisionProcess and return the induced MarkovChain
@type model: `MarkovDecisionProcess`
@type policy: An object such that for any state in model.states, policy[state]
is an action in model.actions
@return: the induced MarkovChain
"""
result_model_type = _get_apply_policy_model_type(model)
result = result_model_type()
result.states.add_from(model.states)
result.states.initial.add_from(model.states.initial)
result.atomic_propositions.add_from(model.atomic_propositions)
for state in model.states:
result.states[state]["ap"] = copy.deepcopy(model.states[state]["ap"])
action = policy[state]
for transition in model.transitions.find(state):
if transition[2][MDP.action_label] != action:
continue
transition_attr = copy.deepcopy(transition[2])
del transition_attr[MDP.action_label]
result.transitions.add(transition[0], transition[1], transition_attr)
return result
def _get_transition_attr(trans_prod, transition_attr_operations):
"""Return the attribute of a transition constructed by taking the product
of transitions in trans_prod.
@type trans_prod: `list` of `Transitions` objects
@type transition_attr_operations: `dict` whose key is the transition attribute key
and value is the operation to be performed for this transition attribute.
For an attribute whose operation is not specified,
a tuple of attribute values from all models will be used.
"""
trans_attr = {}
for idx, trans in enumerate(trans_prod):
for trans_attr_key, trans_attr_value in trans[2].items():
if trans_attr_key not in trans_attr:
trans_attr[trans_attr_key] = [None for i in range(len(trans_prod))]
trans_attr[trans_attr_key][idx] = trans_attr_value
for key, val in trans_attr.items():
operation = transition_attr_operations.get(key, None)
if operation is None:
trans_attr[key] = tuple(val)
else:
trans_attr[key] = operation(*val)
return trans_attr
def _get_composed_model_type(models):
"""Return the class of model obtained from taking a composition of those given by models
@type models: `list` of objects of type KripkeStructure, WeightedKripkeStructure,
MarkovChain or MarkovDecisionProcess
"""
if all(type(m) in [MDP, MC, KS] for m in models):
if any(type(m) == MDP for m in models):
return MDP
if any(type(m) == MC for m in models):
return MC
return KS
if all(type(m) in [WKS, KS] for m in models):
if any(type(m) == WKS for m in models):
return WKS
return KS
return None
def _get_apply_policy_model_type(model):
"""Return the class of model obtained from applying a policy on the given model
@type model: KripkeStructure, WeightedKripkeStructure or MarkovDecisionProcess
"""
if type(model) == MDP:
return MC
raise TypeError("Cannot apply policy for model of type {}".format(type(model)))
| 39.492248
| 92
| 0.689862
|
6ef27ea96c861dae9bd6822848ae3a66c2e8125f
| 7,324
|
py
|
Python
|
src/oci/artifacts/models/container_repository_collection.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/artifacts/models/container_repository_collection.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/artifacts/models/container_repository_collection.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ContainerRepositoryCollection(object):
"""
List of container repository results.
"""
def __init__(self, **kwargs):
"""
Initializes a new ContainerRepositoryCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param layer_count:
The value to assign to the layer_count property of this ContainerRepositoryCollection.
:type layer_count: int
:param layers_size_in_bytes:
The value to assign to the layers_size_in_bytes property of this ContainerRepositoryCollection.
:type layers_size_in_bytes: int
:param image_count:
The value to assign to the image_count property of this ContainerRepositoryCollection.
:type image_count: int
:param items:
The value to assign to the items property of this ContainerRepositoryCollection.
:type items: list[oci.artifacts.models.ContainerRepositorySummary]
:param remaining_items_count:
The value to assign to the remaining_items_count property of this ContainerRepositoryCollection.
:type remaining_items_count: int
:param repository_count:
The value to assign to the repository_count property of this ContainerRepositoryCollection.
:type repository_count: int
"""
self.swagger_types = {
'layer_count': 'int',
'layers_size_in_bytes': 'int',
'image_count': 'int',
'items': 'list[ContainerRepositorySummary]',
'remaining_items_count': 'int',
'repository_count': 'int'
}
self.attribute_map = {
'layer_count': 'layerCount',
'layers_size_in_bytes': 'layersSizeInBytes',
'image_count': 'imageCount',
'items': 'items',
'remaining_items_count': 'remainingItemsCount',
'repository_count': 'repositoryCount'
}
self._layer_count = None
self._layers_size_in_bytes = None
self._image_count = None
self._items = None
self._remaining_items_count = None
self._repository_count = None
@property
def layer_count(self):
"""
**[Required]** Gets the layer_count of this ContainerRepositoryCollection.
Total number of layers.
:return: The layer_count of this ContainerRepositoryCollection.
:rtype: int
"""
return self._layer_count
@layer_count.setter
def layer_count(self, layer_count):
"""
Sets the layer_count of this ContainerRepositoryCollection.
Total number of layers.
:param layer_count: The layer_count of this ContainerRepositoryCollection.
:type: int
"""
self._layer_count = layer_count
@property
def layers_size_in_bytes(self):
"""
**[Required]** Gets the layers_size_in_bytes of this ContainerRepositoryCollection.
Total storage in bytes consumed by layers.
:return: The layers_size_in_bytes of this ContainerRepositoryCollection.
:rtype: int
"""
return self._layers_size_in_bytes
@layers_size_in_bytes.setter
def layers_size_in_bytes(self, layers_size_in_bytes):
"""
Sets the layers_size_in_bytes of this ContainerRepositoryCollection.
Total storage in bytes consumed by layers.
:param layers_size_in_bytes: The layers_size_in_bytes of this ContainerRepositoryCollection.
:type: int
"""
self._layers_size_in_bytes = layers_size_in_bytes
@property
def image_count(self):
"""
**[Required]** Gets the image_count of this ContainerRepositoryCollection.
Total number of images.
:return: The image_count of this ContainerRepositoryCollection.
:rtype: int
"""
return self._image_count
@image_count.setter
def image_count(self, image_count):
"""
Sets the image_count of this ContainerRepositoryCollection.
Total number of images.
:param image_count: The image_count of this ContainerRepositoryCollection.
:type: int
"""
self._image_count = image_count
@property
def items(self):
"""
**[Required]** Gets the items of this ContainerRepositoryCollection.
Collection of container repositories.
:return: The items of this ContainerRepositoryCollection.
:rtype: list[oci.artifacts.models.ContainerRepositorySummary]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this ContainerRepositoryCollection.
Collection of container repositories.
:param items: The items of this ContainerRepositoryCollection.
:type: list[oci.artifacts.models.ContainerRepositorySummary]
"""
self._items = items
@property
def remaining_items_count(self):
"""
**[Required]** Gets the remaining_items_count of this ContainerRepositoryCollection.
Estimated number of remaining results.
:return: The remaining_items_count of this ContainerRepositoryCollection.
:rtype: int
"""
return self._remaining_items_count
@remaining_items_count.setter
def remaining_items_count(self, remaining_items_count):
"""
Sets the remaining_items_count of this ContainerRepositoryCollection.
Estimated number of remaining results.
:param remaining_items_count: The remaining_items_count of this ContainerRepositoryCollection.
:type: int
"""
self._remaining_items_count = remaining_items_count
@property
def repository_count(self):
"""
**[Required]** Gets the repository_count of this ContainerRepositoryCollection.
Total number of repositories.
:return: The repository_count of this ContainerRepositoryCollection.
:rtype: int
"""
return self._repository_count
@repository_count.setter
def repository_count(self, repository_count):
"""
Sets the repository_count of this ContainerRepositoryCollection.
Total number of repositories.
:param repository_count: The repository_count of this ContainerRepositoryCollection.
:type: int
"""
self._repository_count = repository_count
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32.40708
| 245
| 0.670126
|
8d3d091fb392074516d18416c8441c455bc83f43
| 1,624
|
py
|
Python
|
alipay/aop/api/response/AntMerchantExpandDeliveryGoodsinfoSyncResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/AntMerchantExpandDeliveryGoodsinfoSyncResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/AntMerchantExpandDeliveryGoodsinfoSyncResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AntMerchantExpandDeliveryGoodsinfoSyncResponse(AlipayResponse):
def __init__(self):
super(AntMerchantExpandDeliveryGoodsinfoSyncResponse, self).__init__()
self._assign_item_id = None
self._error_code = None
self._error_desc = None
self._success = None
@property
def assign_item_id(self):
return self._assign_item_id
@assign_item_id.setter
def assign_item_id(self, value):
self._assign_item_id = value
@property
def error_code(self):
return self._error_code
@error_code.setter
def error_code(self, value):
self._error_code = value
@property
def error_desc(self):
return self._error_desc
@error_desc.setter
def error_desc(self, value):
self._error_desc = value
@property
def success(self):
return self._success
@success.setter
def success(self, value):
self._success = value
def parse_response_content(self, response_content):
response = super(AntMerchantExpandDeliveryGoodsinfoSyncResponse, self).parse_response_content(response_content)
if 'assign_item_id' in response:
self.assign_item_id = response['assign_item_id']
if 'error_code' in response:
self.error_code = response['error_code']
if 'error_desc' in response:
self.error_desc = response['error_desc']
if 'success' in response:
self.success = response['success']
| 29
| 119
| 0.676724
|
7c02ec58680adb1c83f933074a5eb599cb3e27c7
| 10,737
|
py
|
Python
|
zorg/buildbot/util/artifacts.py
|
antiagainst/llvm-zorg
|
a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918
|
[
"Apache-2.0"
] | 27
|
2019-01-15T03:03:58.000Z
|
2022-03-22T23:31:36.000Z
|
zorg/buildbot/util/artifacts.py
|
antiagainst/llvm-zorg
|
a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918
|
[
"Apache-2.0"
] | 21
|
2020-05-29T01:12:26.000Z
|
2022-03-29T20:06:22.000Z
|
zorg/buildbot/util/artifacts.py
|
antiagainst/llvm-zorg
|
a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918
|
[
"Apache-2.0"
] | 38
|
2019-02-10T02:46:33.000Z
|
2022-03-26T10:27:29.000Z
|
import os
import buildbot
import config
from buildbot.steps.shell import WithProperties
from zorg.buildbot.util.phasedbuilderutils import setProperty, determine_phase_id
from zorg.buildbot.util.phasedbuilderutils import set_config_option
# Get some parameters about where to upload and download results from.
is_production = set_config_option('Master Options', 'is_production')
if is_production:
rsync_user = set_config_option('Master Options', 'rsync_user',
'buildmaster')
master_name = set_config_option('Master Options', 'master_name',
'localhost')
master_protocol = set_config_option('Master Options',
'master_protocol', 'http')
base_download_url = '%s://%s/artifacts' % (master_protocol, master_name)
base_package_url = '%s://%s/packages' % (master_protocol, master_name)
package_url = set_config_option('Master Options', 'package_url',
base_package_url)
artifacts_path = set_config_option('Master Options', 'artifacts_path',
os.path.expanduser('~/artifacts'))
curl_flags = set_config_option('Master Options', 'curl_flags',
'-fvLo')
else:
import getpass
rsync_user = getpass.getuser()
master_name = 'localhost'
master_protocol = 'http'
base_download_url = 'http://%s/~%s/artifacts' % (master_name, rsync_user)
package_url = 'http://%s/~%s/packages' % (master_name, rsync_user)
artifacts_path = os.path.expanduser('~/artifacts')
curl_flags = '-fvLo'
base_rsync_path = '%s@%s:%s' % (rsync_user, master_name, artifacts_path)
# This method is used in determining the name of a given compiler archive
def _determine_compiler_kind(props):
# we need to differentiate between configure/make style builds (clang)
# from buildit style builde (apple-clang)
buildName = props['buildername']
kind = buildName
subname = buildName
if '_' in buildName:
kind,subname = buildName.split('_', 1)
if 'clang' in kind:
subname = kind
for kind in ('apple-clang','clang'):
if kind in subname:
return kind
raise ValueError, "unknown compiler"
# compiler_path and archive_name should be completely deterministic. Any
# methods acting on an archive should use the following two methods to
# calculate the path and/or name for an archive
def _determine_archive_name(props):
# phase_id must be set upstream. Usually by a phase builder
archive_name = _determine_compiler_kind(props)
if props.has_key('phase_id') and props['phase_id']:
archive_name += '-' + props['phase_id'] + '.tar.gz'
else:
raise ValueError, "phase_id doesn't exist"
return archive_name
def _determine_compiler_path(props):
# We need to segregate compiler builds based on both branch and builder
# TODO: better solution when branch is None
compiler_path = props['buildername']
if props.has_key('default_branch') and props['default_branch']:
compiler_path = props['default_branch']
elif props.has_key('branch') and props['branch']:
compiler_path = props['branch']
elif props.has_key('use_builder') and props['use_builder']:
compiler_path = props['use_builder']
return compiler_path
def _determine_bootstrap_url(props):
if props.has_key('scheduler'):
name= ''
if props['scheduler'].startswith('phase2'):
# always use phase1 compiler for phase2
# TODO: this shouldn't be hard coded
name = 'clang-x86_64-darwin11-nobootstrap-RAincremental'
else:
# always use phase2 compiler for phase3 & phase4 compiler builds
# TODO: this shouldn't be hard coded
name = 'clang-x86_64-darwin11-RA'
curl = base_download_url + '/' + name + '/clang-' + props['phase_id']
curl += '.tar.gz'
return curl
else:
# if we get this far, we can assume that someone clicked 'rebuild'
# (otherwise it would have a scheduler, or not have a phase_id)
# we'll fall back to the phase1 build for this compiler
curl = base_download_url + '/clang-x86_64-darwin11-nobootstrap-RA/'
curl += props['buildername'] + '.tar.gz'
return curl
def GetCompilerRoot(f):
# The following steps are used to retrieve a compiler archive
# clean out any existing archives
f.addStep(buildbot.steps.shell.ShellCommand(
name='rm.host-compiler',
command=['rm', '-rfv', 'host-compiler', 'host-compiler.tar.gz'],
haltOnFailure=False, description=['rm', 'host-compiler'],
workdir=WithProperties('%(builddir)s')))
setProperty(f, 'rootURL',
WithProperties( base_download_url + '/%(getpath)s/%(getname)s',
getpath=_determine_compiler_path,
getname=_determine_archive_name))
# curl down the archive
f.addStep(buildbot.steps.shell.ShellCommand(
name='download.artifacts',
command=['curl', curl_flags, 'host-compiler.tar.gz',
WithProperties('%(rootURL)s')],
haltOnFailure=True,
description=['download build artifacts'],
workdir=WithProperties('%(builddir)s')))
# extract the compiler root from the archive
f.addStep(buildbot.steps.shell.ShellCommand(
name='unzip', command=['tar', '-zxvf','../host-compiler.tar.gz'],
haltOnFailure=True, description=['extract', 'host-compiler'],
workdir='host-compiler'))
return f
def uploadArtifacts(f, rootdir='clang-install'):
#phase_id is required to make sure that path to archives are deterministic.
setProperty(f, 'phase_id', WithProperties('%(get_phase_id)s',
get_phase_id = determine_phase_id))
# we always create/name a compiler archive based on the same criteria
archive_path = WithProperties('%(builddir)s/%(getname)s',
getname=_determine_archive_name)
if rootdir.endswith('install'):
cit_path = 'clang-build/**/bin/c-index-test'
copy_command = 'cp %s %s/bin/' % (cit_path, rootdir)
f.addStep(buildbot.steps.shell.ShellCommand(
name='add.cit', haltOnFailure=True,
command = ['sh', '-c', copy_command],
description=['add c-index-test to root'],
workdir=WithProperties('%(builddir)s')))
f.addStep(buildbot.steps.shell.ShellCommand(
name='tar.and.zip', haltOnFailure=True,
command=['tar', '-czv', '--exclude', '.svn', '-f', archive_path, './'],
description=['tar', '&', 'zip'], workdir=rootdir))
# Upload the archive.
archive_dest = WithProperties(base_rsync_path +'/%(getpath)s/',
getpath=_determine_compiler_path)
f.addStep(buildbot.steps.shell.ShellCommand(
name='upload.artifacts', haltOnFailure=True,
command=['rsync', '-pave', 'ssh', archive_path, archive_dest],
description=['upload build artifacts'],
workdir=WithProperties('%(builddir)s')))
# Set the artifact URL in a property for easy access from the build log.
download_str = base_download_url + '/%(getpath)s/%(getname)s'
artifactsURL = WithProperties(download_str, getpath=_determine_compiler_path,
getname=_determine_archive_name)
setProperty(f, 'artifactsURL', artifactsURL)
return f
def project_from_name(builder_name):
for project in ('apple-clang', 'clang'):
if project in builder_name:
return project
raise RuntimeError('Invalid builder name.')
def determine_url(props):
if props.has_key('phase_id') and props.has_key('category'):
if props['category'].startswith('build-'):
return _determine_bootstrap_url(props)
project = project_from_name(props['buildername'])
name = props['use_builder']
curl = base_download_url + '/' + name + '/' + project_from_name(name)
curl += '-' + props['phase_id'] + '.tar.gz'
return curl
# phase_id does not exist, so this has to be a manually triggered build.
# we will fall back to the latest_validated build for the use_builder
# property if it exists, otherwise, fall back to the latest_validated build
# for this builder.
curl = base_download_url + '/validated_builds/'
if props.has_key('use_builder'):
curl += props['use_builder'] + '.tar.gz'
else:
curl += props['buildername'] + '.tar.gz'
return curl
def GetCompilerArtifacts(f):
f.addStep(buildbot.steps.shell.ShellCommand(
name='rm.host-compiler',
command=['rm', '-rfv', 'host-compiler', 'host-compiler.tar.gz'],
haltOnFailure=False, description=['rm', 'host-compiler'],
workdir=WithProperties('%(builddir)s')))
f.addStep(buildbot.steps.shell.ShellCommand(
name='download.artifacts',
command=['curl', curl_flags, 'host-compiler.tar.gz',
WithProperties('%(get_curl)s', get_curl=determine_url)],
haltOnFailure=True, description=['download build artifacts'],
workdir=WithProperties('%(builddir)s')))
f.addStep(buildbot.steps.shell.ShellCommand(
name='unzip', command=['tar', '-zxvf','../host-compiler.tar.gz'],
haltOnFailure=True, description=['extract', 'host-compiler'],
workdir='host-compiler'))
return f
def GetCCFromCompilerArtifacts(f, base_dir):
def get_cc(status, stdin, stdout):
lines = filter(bool, stdin.split('\n'))
for line in lines:
if 'bin/clang' in line:
cc_path = line
return { 'cc_path' : cc_path }
return { }
f.addStep(buildbot.steps.shell.SetProperty(
name='find.cc',
command=['find', base_dir, '-name', 'clang'],
extract_fn=get_cc,
workdir=WithProperties('%(builddir)s')))
return f
def GetCXXFromCompilerArtifacts(f, base_dir):
def get_cxx(status, stdin, stdout):
lines = filter(bool, stdin.split('\n'))
for line in lines:
if 'bin/clang++' in line:
cxx_path = line
return { 'cxx_path' : cxx_path }
return { }
f.addStep(buildbot.steps.shell.SetProperty(
name='find.cxx',
command=['find', base_dir, '-name', 'clang++'],
extract_fn=get_cxx,
workdir=WithProperties('%(builddir)s')))
return f
| 44.924686
| 85
| 0.627084
|
0b1062297ea37b4cfb0804fe69a246100aa038c9
| 226
|
py
|
Python
|
WeBlog/app/main/__init__.py
|
imrocky1976/web
|
3af7fbd82af8b4d2737672779d5742825b94a6fe
|
[
"MIT"
] | null | null | null |
WeBlog/app/main/__init__.py
|
imrocky1976/web
|
3af7fbd82af8b4d2737672779d5742825b94a6fe
|
[
"MIT"
] | 4
|
2020-03-24T17:13:37.000Z
|
2022-03-08T21:09:48.000Z
|
WeBlog/app/main/__init__.py
|
imrocky1976/web
|
3af7fbd82af8b4d2737672779d5742825b94a6fe
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
main = Blueprint('main', __name__)
from app.main import errors, views
from app.models import Permission
@main.app_context_processor
def inject_permissions():
return dict(Permission=Permission)
| 25.111111
| 38
| 0.80531
|
bed260805baa1ef468d9e95986e74259460d4d45
| 120
|
py
|
Python
|
dryadic/learning/stan/logistic/__init__.py
|
ohsu-comp-bio/dryads
|
015f6d3186a5146809334e2490c072e675b22891
|
[
"MIT"
] | null | null | null |
dryadic/learning/stan/logistic/__init__.py
|
ohsu-comp-bio/dryads
|
015f6d3186a5146809334e2490c072e675b22891
|
[
"MIT"
] | null | null | null |
dryadic/learning/stan/logistic/__init__.py
|
ohsu-comp-bio/dryads
|
015f6d3186a5146809334e2490c072e675b22891
|
[
"MIT"
] | null | null | null |
from .classifiers import BaseLogistic
from .stan_models import gauss_model
__all__ = ['BaseLogistic', 'gauss_model']
| 17.142857
| 41
| 0.791667
|
75db4daa25a7d76e3b2d9e3e7df002cf9c0d800e
| 2,341
|
py
|
Python
|
docs/sphinx/source/conf.py
|
jmairal/arsenic
|
7828cf15f93eac40962c222594ff1fe537fff49c
|
[
"BSD-3-Clause"
] | null | null | null |
docs/sphinx/source/conf.py
|
jmairal/arsenic
|
7828cf15f93eac40962c222594ff1fe537fff49c
|
[
"BSD-3-Clause"
] | null | null | null |
docs/sphinx/source/conf.py
|
jmairal/arsenic
|
7828cf15f93eac40962c222594ff1fe537fff49c
|
[
"BSD-3-Clause"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# -- Project information -----------------------------------------------------
project = 'Cyanure'
copyright = '2019, Julien Mairal'
author = 'Julien Mairal'
# The full version, including alpha/beta/rc tags
release = '1.0.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.inheritance_diagram',
'numpydoc',
'autodocsumm'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
autosummary_generate = True
autodoc_default_options = {"autosummary": True}
numpydoc_show_class_members = False
source_encoding = 'utf-8'
#source_encoding = 'iso-8859-15'
language = 'en'
html_output_encoding = 'utf-8'
#html_output_encoding = 'ISO-8859-1'
| 32.068493
| 79
| 0.6651
|
caa74b617f4a51cafa48cd436ddebbb8f145a9a6
| 2,314
|
py
|
Python
|
tests/unittests/hub/test_pipeline_manager.py
|
krishnakatyal/towhee
|
c5e043aa1509cf46644ca6b53f691d6ed2647212
|
[
"Apache-2.0"
] | null | null | null |
tests/unittests/hub/test_pipeline_manager.py
|
krishnakatyal/towhee
|
c5e043aa1509cf46644ca6b53f691d6ed2647212
|
[
"Apache-2.0"
] | 1
|
2022-01-19T06:21:07.000Z
|
2022-01-19T06:21:07.000Z
|
tests/unittests/hub/test_pipeline_manager.py
|
jennyli-z/towhee
|
55c55fd961229575b75eae269b55090c839f8dcd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from shutil import rmtree
from requests.exceptions import HTTPError
from towhee.utils.git_utils import GitUtils
from towhee.hub.pipeline_manager import PipelineManager
public_path = Path(__file__).parent.parent.resolve()
class TestPipelineManager(unittest.TestCase):
"""
Unit test for PipelineManager.
"""
def test_create(self):
repo = 'pipeline-operator'
manager = PipelineManager('towhee', repo)
with self.assertRaises(HTTPError):
manager.create('psw')
def test_exists_create(self):
repo = 'ci-test'
manager = PipelineManager('towhee', repo)
manager.create('psw')
def test_init_pipeline(self):
temp_repo = 'pipeline-template'
git = GitUtils(author='towhee', repo=temp_repo)
git.clone(local_repo_path=public_path / 'test_cache' / temp_repo)
pipeline_repo = 'init-pipeline'
pipeline_manager = PipelineManager('towhee', pipeline_repo)
if not (public_path / 'test_cache' / pipeline_repo).exists():
(public_path / 'test_cache' / pipeline_repo).mkdir()
pipeline_manager.init_pipeline(file_temp=public_path / 'test_cache' / temp_repo, file_dest=public_path / 'test_cache' / pipeline_repo)
self.assertTrue(pipeline_manager.check(public_path / 'test_cache' / pipeline_repo))
rmtree(public_path / 'test_cache' / temp_repo)
rmtree(public_path / 'test_cache' / pipeline_repo)
def test_check(self):
repo = 'ci-test'
manager = PipelineManager('towhee', repo)
self.assertTrue(manager.check(public_path / 'mock_pipelines/ci_test'))
if __name__ == '__main__':
unittest.main()
| 36.15625
| 142
| 0.709162
|
e48457d47dc71461ba3b0e36528b75154c99c67e
| 391
|
py
|
Python
|
mundo1/ex020.py
|
Igor3550/Exercicios-de-python
|
e0f6e043df4f0770ac15968485fbb19698b4ac6b
|
[
"MIT"
] | null | null | null |
mundo1/ex020.py
|
Igor3550/Exercicios-de-python
|
e0f6e043df4f0770ac15968485fbb19698b4ac6b
|
[
"MIT"
] | null | null | null |
mundo1/ex020.py
|
Igor3550/Exercicios-de-python
|
e0f6e043df4f0770ac15968485fbb19698b4ac6b
|
[
"MIT"
] | null | null | null |
# faça um programa que leia os quatro nomes dos alunos e sorteie a ordem de apresentação do trabalho
import random
a1 = input('Digite o nome do primeiro aluno: ')
a2 = input('Digite o nome do segundo aluno: ')
a3 = input('Digite o nome do terceiro aluno: ')
a4 = input('Digite o nome do quarto aluno: ')
x = [a1, a2, a3, a4]
random.shuffle(x)
print('A ordem de apresentação é {}'.format(x))
| 39.1
| 100
| 0.710997
|
8950b0e7c929d22387c214c33748c48839c85eb8
| 6,422
|
py
|
Python
|
ros/src/tl_detector/tl_detector.py
|
MadridTeam/ROSCentral
|
9bcedf04c64f3b3d992077ae02ac501b92063336
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/tl_detector.py
|
MadridTeam/ROSCentral
|
9bcedf04c64f3b3d992077ae02ac501b92063336
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/tl_detector.py
|
MadridTeam/ROSCentral
|
9bcedf04c64f3b3d992077ae02ac501b92063336
|
[
"MIT"
] | 1
|
2018-10-23T15:20:53.000Z
|
2018-10-23T15:20:53.000Z
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.camera_image = None
self.lights = []
self.waypoints_2d = None
self.waypoint_tree = None
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier(False)
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y]
for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
#rospy.logwarn('IMAGE Callback')
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
#TODO implement
closest_wp_idx = self.waypoint_tree.query([x, y], 1)[1]
return closest_wp_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if(not self.has_image):
self.prev_light_loc = None
return False
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
#Get classification
return self.light_classifier.get_classification(cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if not self.pose is None and not self.waypoint_tree is None:
car_position = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.x)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
# Get stop line waypoint index
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
# Find closest stop line waypoint index
d = temp_wp_idx - car_position
if d >= 0 and d < diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
rospy.logerr("Manish: {}, {}".format(line_wp_idx, state))
return line_wp_idx, state
#self.waypoints = None
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| 35.877095
| 107
| 0.642479
|
da8a93bc1d2923d330fc2cff04fc2eb99ac6f311
| 4,260
|
py
|
Python
|
Interfaces/Spacecraft/LoadMMS.py
|
tulasinandan/TurbAn
|
14e912157107ad46c4c65bcac745da738d93c1c6
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
Interfaces/Spacecraft/LoadMMS.py
|
tulasinandan/TurbAn
|
14e912157107ad46c4c65bcac745da738d93c1c6
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
Interfaces/Spacecraft/LoadMMS.py
|
tulasinandan/TurbAn
|
14e912157107ad46c4c65bcac745da738d93c1c6
|
[
"BSD-2-Clause-Patent"
] | 10
|
2019-03-22T15:30:12.000Z
|
2021-02-10T02:55:50.000Z
|
import sys
import os
sys.path.insert(0,os.environ['HOME']+'/AJGAR/TurbAn')
import pandas as pd
import numpy as np
import TurbAn.Analysis.Simulations as af
import TurbAn.Analysis.TimeSeries.Time_Series_Analysis as tsa
def create_df_ascii(dirname,chop_ends=100,hampel_filter=None,lowpass_freq=None,spacecraft=1):
try:
print('Creating Data Frame from '+dirname)
except:
print("No directory given, exiting")
return
spacecraft = str(spacecraft)
cadence=os.path.basename(dirname)
if cadence == 'B':
suffix=spacecraft+'_resB1.dat'
elif cadence == 'electrons':
suffix=spacecraft+'_resNe_1.dat'
elif cadence == 'ions':
suffix=spacecraft+'_resNi_1.dat'
elif cadence == 'B_hmp':
suffix=spacecraft+'_resB1_hmp.dat'
elif cadence == 'B_hmp_lp':
suffix=spacecraft+'_resB1_hmp_lp.dat'
elif cadence == 'electrons_hmp':
suffix=spacecraft+'_resNe_1_hmp.dat'
elif cadence == 'electrons_hmp_lp':
suffix=spacecraft+'_resNe_1_hmp_lp.dat'
elif cadence == 'ions_hmp':
suffix=spacecraft+'_resNi_1_hmp.dat'
elif cadence == 'ions_hmp_lp':
suffix=spacecraft+'_resNi_1_hmp_lp.dat'
b =np.loadtxt(dirname+'/B' +suffix)
dt=b[2,0]-b[1,0]
nt=len(b[:,0]); print('Number of entries nt=',nt)
nt=nt if nt%2==0 else nt-1
#
if chop_ends is not None:
b=b[chop_ends:nt,:][:-chop_ends,:]
vi=np.loadtxt(dirname+'/Vi_'+suffix)[chop_ends:nt,:][:-chop_ends,:]
ve=np.loadtxt(dirname+'/Ve_'+suffix)[chop_ends:nt,:][:-chop_ends,:]
ni=np.loadtxt(dirname+'/Ni_'+suffix)[chop_ends:nt,:][:-chop_ends,:]
ne=np.loadtxt(dirname+'/Ne_'+suffix)[chop_ends:nt,:][:-chop_ends,:]
R =np.loadtxt(dirname+'/R' +suffix)[chop_ends:nt,:][:-chop_ends,:]
else:
b=b[:nt,:]
vi=np.loadtxt(dirname+'/Vi_'+suffix)[:nt,:]
ve=np.loadtxt(dirname+'/Ve_'+suffix)[:nt,:]
ni=np.loadtxt(dirname+'/Ni_'+suffix)[:nt,:]
ne=np.loadtxt(dirname+'/Ne_'+suffix)[:nt,:]
R =np.loadtxt(dirname+'/R' +suffix)[:nt,:]
#
if hampel_filter is not None:
from .Hampel import spectral_hampel_scalar, spectral_hampel_vector
print('Running Spectral Hampel filter on ...')
print('... p+ velocity')
vi[:,1],vi[:,2],vi[:,3]=spectral_hampel_vector(vi[:,1],vi[:,2],vi[:,3],dt,\
hampel_filter['p+'][0],hampel_filter['p+'][1])
print('... e- velocity')
ve[:,1],ve[:,2],ve[:,3]=spectral_hampel_vector(ve[:,1],ve[:,2],ve[:,3],dt,\
hampel_filter['e-'][0],hampel_filter['e-'][1])
print('... p+ density')
ni[:,1] = spectral_hampel_scalar(ni[:,1],dt,hampel_filter['p+'][0],hampel_filter['p+'][1])
print('... e- density')
ne[:,1] = spectral_hampel_scalar(ne[:,1],dt,hampel_filter['e-'][0],hampel_filter['e-'][1])
if 'chop_edges' in hampel_filter:
chop_edges=hampel_filter['chop_edges']
b = b[chop_edges:,:][:-chop_edges,:]
vi=vi[chop_edges:,:][:-chop_edges,:]
ve=ve[chop_edges:,:][:-chop_edges,:]
ni=ni[chop_edges:,:][:-chop_edges,:]
ne=ne[chop_edges:,:][:-chop_edges,:]
R = R[chop_edges:,:][:-chop_edges,:]
if lowpass_freq is not None and type(lowpass_freq) is not dict:
temp=lowpass_freq
lowpass_freq={}
for i in 'vi','ve','ni','ne':
lowpass_freq[i]=temp
temp=None
if lowpass_freq is not None:
print('Applying low-pass filter on ...')
print('vi at ',str(lowpass_freq['vi']),' ... and ...')
print('ve at ',str(lowpass_freq['ve']),' ... ')
for i in range(1,4):
vi[:,i]=tsa.lowpass_sharp(vi[:,i],dt,lowpass_freq['vi'])
ve[:,i]=tsa.lowpass_sharp(ve[:,i],dt,lowpass_freq['ve'])
print('ni at ',str(lowpass_freq['ni']),' ... ')
ni[:,1]=tsa.lowpass_sharp(ni[:,1],dt,lowpass_freq['ni'])
print('ne at ',str(lowpass_freq['ne']),' ... ')
ne[:,1]=tsa.lowpass_sharp(ne[:,1],dt,lowpass_freq['ne'])
df=pd.DataFrame({'t':R[:,0],'x':R[:,1],'y':R[:,2],'z':R[:,3],'bx':b[:,1],\
'by':b[:,2],'bz':b[:,3],'vix':vi[:,1],'viy':vi[:,2],\
'viz':vi[:,3],'ni':ni[:,1],'vex':ve[:,1],'vey':ve[:,2],\
'vez':ve[:,3],'ne':ne[:,1]})
return df
| 39.813084
| 96
| 0.586385
|
bbc0769297cf7408c5110e84c296caf3b3bfc977
| 1,945
|
py
|
Python
|
google-cloud-sdk/lib/surface/container/builds/describe.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2017-11-29T18:52:27.000Z
|
2017-11-29T18:52:27.000Z
|
google-cloud-sdk/.install/.backup/lib/surface/container/builds/describe.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/.install/.backup/lib/surface/container/builds/describe.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2020-07-25T12:09:01.000Z
|
2020-07-25T12:09:01.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Describe build command."""
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.calliope import base
class Describe(base.DescribeCommand):
"""Get information about a particular build."""
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
parser.add_argument(
'build',
help=('The build to describe. The ID of the build is printed at the '
'end of the build submission process, or in the ID column when '
'listing builds.'),
)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
client = cloudbuild_util.GetClientInstance()
resources = self.context['registry']
build_ref = resources.Parse(
args.build, collection='cloudbuild.projects.builds')
return client.projects_builds.Get(
client.MESSAGES_MODULE.CloudbuildProjectsBuildsGetRequest(
projectId=build_ref.projectId, id=build_ref.id))
| 34.122807
| 79
| 0.71054
|
5b798cbd462f59029f5d2ca4e4bf424e27bff8eb
| 9,025
|
py
|
Python
|
test/python/circuit/test_circuit_load_from_qpy.py
|
irajput/qiskit-terra
|
e9f06e457bfb67afe1b36b6b9b0b315eb1a24800
|
[
"Apache-2.0"
] | 1
|
2021-07-06T09:07:47.000Z
|
2021-07-06T09:07:47.000Z
|
test/python/circuit/test_circuit_load_from_qpy.py
|
evercodes/qiskit-terra
|
649fec2cd1644c43eabc39b0a588c0a9347a2b50
|
[
"Apache-2.0"
] | null | null | null |
test/python/circuit/test_circuit_load_from_qpy.py
|
evercodes/qiskit-terra
|
649fec2cd1644c43eabc39b0a588c0a9347a2b50
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test cases for the circuit qasm_file and qasm_string method."""
import io
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.circuit.random import random_circuit
from qiskit.circuit.gate import Gate
from qiskit.circuit.instruction import Instruction
from qiskit.circuit.parameter import Parameter
from qiskit.opflow import X, Y, Z
from qiskit.test import QiskitTestCase
from qiskit.circuit.qpy_serialization import dump, load
class TestLoadFromQPY(QiskitTestCase):
"""Test circuit.from_qasm_* set of methods."""
def test_qpy_full_path(self):
"""Test full path qpy serialization for basic circuit."""
qr_a = QuantumRegister(4, "a")
qr_b = QuantumRegister(4, "b")
cr_c = ClassicalRegister(4, "c")
cr_d = ClassicalRegister(4, "d")
q_circuit = QuantumCircuit(
qr_a,
qr_b,
cr_c,
cr_d,
name="MyCircuit",
metadata={"test": 1, "a": 2},
global_phase=3.14159,
)
q_circuit.h(qr_a)
q_circuit.cx(qr_a, qr_b)
q_circuit.barrier(qr_a)
q_circuit.barrier(qr_b)
q_circuit.measure(qr_a, cr_c)
q_circuit.measure(qr_b, cr_d)
qpy_file = io.BytesIO()
dump(q_circuit, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(q_circuit, new_circ)
self.assertEqual(q_circuit.global_phase, new_circ.global_phase)
self.assertEqual(q_circuit.metadata, new_circ.metadata)
self.assertEqual(q_circuit.name, new_circ.name)
def test_circuit_with_conditional(self):
"""Test that instructions with conditions are correctly serialized."""
qc = QuantumCircuit(1, 1)
qc.x(0).c_if(qc.cregs[0], 1)
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
def test_int_parameter(self):
"""Test that integer parameters are correctly serialized."""
qc = QuantumCircuit(1)
qc.rx(3, 0)
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
def test_float_parameter(self):
"""Test that float parameters are correctly serialized."""
qc = QuantumCircuit(1)
qc.rx(3.14, 0)
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
def test_numpy_float_parameter(self):
"""Test that numpy float parameters are correctly serialized."""
qc = QuantumCircuit(1)
qc.rx(np.float32(3.14), 0)
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
def test_numpy_int_parameter(self):
"""Test that numpy integer parameters are correctly serialized."""
qc = QuantumCircuit(1)
qc.rx(np.int16(3), 0)
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
def test_unitary_gate(self):
"""Test that numpy array parameters are correctly serialized"""
qc = QuantumCircuit(1)
unitary = np.array([[0, 1], [1, 0]])
qc.unitary(unitary, 0)
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
def test_opaque_gate(self):
"""Test that custom opaque gate is correctly serialized"""
custom_gate = Gate("black_box", 1, [])
qc = QuantumCircuit(1)
qc.append(custom_gate, [0])
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
def test_opaque_instruction(self):
"""Test that custom opaque instruction is correctly serialized"""
custom_gate = Instruction("black_box", 1, 0, [])
qc = QuantumCircuit(1)
qc.append(custom_gate, [0])
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
def test_custom_gate(self):
"""Test that custom gate is correctly serialized"""
custom_gate = Gate("black_box", 1, [])
custom_definition = QuantumCircuit(1)
custom_definition.h(0)
custom_definition.rz(1.5, 0)
custom_definition.sdg(0)
custom_gate.definition = custom_definition
qc = QuantumCircuit(1)
qc.append(custom_gate, [0])
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
self.assertEqual(qc.decompose(), new_circ.decompose())
def test_custom_instruction(self):
"""Test that custom instruction is correctly serialized"""
custom_gate = Instruction("black_box", 1, 0, [])
custom_definition = QuantumCircuit(1)
custom_definition.h(0)
custom_definition.rz(1.5, 0)
custom_definition.sdg(0)
custom_gate.definition = custom_definition
qc = QuantumCircuit(1)
qc.append(custom_gate, [0])
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
self.assertEqual(qc.decompose(), new_circ.decompose())
def test_parameter(self):
"""Test that a circuit with a parameter is correctly serialized."""
theta = Parameter("theta")
qc = QuantumCircuit(5, 1)
qc.h(0)
for i in range(4):
qc.cx(i, i + 1)
qc.barrier()
qc.rz(theta, range(5))
qc.barrier()
for i in reversed(range(4)):
qc.cx(i, i + 1)
qc.h(0)
qc.measure(0, 0)
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
self.assertEqual(qc.bind_parameters({theta: 3.14}), new_circ.bind_parameters({theta: 3.14}))
def test_bound_parameter(self):
"""Test a circuit with a bound parameter is correctly serialized."""
theta = Parameter("theta")
qc = QuantumCircuit(5, 1)
qc.h(0)
for i in range(4):
qc.cx(i, i + 1)
qc.barrier()
qc.rz(theta, range(5))
qc.barrier()
for i in reversed(range(4)):
qc.cx(i, i + 1)
qc.h(0)
qc.measure(0, 0)
qc.assign_parameters({theta: 3.14})
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circ = load(qpy_file)[0]
self.assertEqual(qc, new_circ)
def test_parameter_expression(self):
"""Test a circuit with a parameter expression."""
theta = Parameter("theta")
phi = Parameter("phi")
sum_param = theta + phi
qc = QuantumCircuit(5, 1)
qc.h(0)
for i in range(4):
qc.cx(i, i + 1)
qc.barrier()
qc.rz(sum_param, range(3))
qc.rz(phi, 3)
qc.rz(theta, 4)
qc.barrier()
for i in reversed(range(4)):
qc.cx(i, i + 1)
qc.h(0)
qc.measure(0, 0)
qpy_file = io.BytesIO()
dump(qc, qpy_file)
qpy_file.seek(0)
new_circuit = load(qpy_file)[0]
self.assertEqual(qc, new_circuit)
def test_string_parameter(self):
"""Test a PauliGate instruction that has string parameters."""
circ = (X ^ Y ^ Z).to_circuit_op().to_circuit()
qpy_file = io.BytesIO()
dump(circ, qpy_file)
qpy_file.seek(0)
new_circuit = load(qpy_file)[0]
self.assertEqual(circ, new_circuit)
def test_multiple_circuits(self):
"""Test multiple circuits can be serialized together."""
circuits = []
for i in range(10):
circuits.append(
random_circuit(10, 10, measure=True, conditional=True, reset=True, seed=42 + i)
)
qpy_file = io.BytesIO()
dump(circuits, qpy_file)
qpy_file.seek(0)
new_circs = load(qpy_file)
self.assertEqual(circuits, new_circs)
| 32.937956
| 100
| 0.603546
|
c66b5c089b1ef1413e83ff11e1f3090c27167078
| 6,120
|
py
|
Python
|
src/fuzzy_potato/database/postgres.py
|
cezary986/fuzzy-potato
|
11a7e74529d7ffe925aed6a4fd4e5979043d0a84
|
[
"MIT"
] | null | null | null |
src/fuzzy_potato/database/postgres.py
|
cezary986/fuzzy-potato
|
11a7e74529d7ffe925aed6a4fd4e5979043d0a84
|
[
"MIT"
] | null | null | null |
src/fuzzy_potato/database/postgres.py
|
cezary986/fuzzy-potato
|
11a7e74529d7ffe925aed6a4fd4e5979043d0a84
|
[
"MIT"
] | null | null | null |
import sys
from typing import List
from fuzzy_potato.core import BaseStorage, SegmentData, WordData, GramData, TextData
import logging
import time
import psycopg2
from .sql import create_db_sql, delete_data_sql, insert_gram_sql, insert_word_sql, insert_segment_sql, begin_insert, \
end_insert, fuzzy_match_words, fuzzy_match_segments, match_word_for_segments, get_db_statistics
sys.path.append('..')
class DataBaseConnector:
def __init__(self):
self.connection = None
self.cursor = None
self.port = None
self.host = None
self.username = None
self.password = None
self.database_name = None
def connect(self, port: str, host: str, username: str, password: str, database_name: str):
self.port = port
self.host = host
self.username = username
self.password = password
self.database_name = database_name
self._connect_self()
def _connect_self(self):
try:
self.connection = psycopg2.connect(
user=self.username, password=self.password, host=self.host, port=self.port, database=self.database_name)
self.cursor = self.connection.cursor()
except psycopg2.Error as error:
logging.error('Error while connecting to PostgreSQL')
logging.error(error)
raise Exception('Error while connecting to PostgreSQL', str(error))
def disconnect(self):
if self.connection:
self.cursor.close()
self.connection.close()
logging.info("Closing PostgreSQL connection")
def execute_query(self, sql: str, fetch=False):
if self.connection.closed > 0:
self._connect_self()
try:
self.cursor.execute(sql)
self.connection.commit()
logging.debug('Query finished successfully')
if fetch:
return self.cursor.fetchall()
except psycopg2.DatabaseError as error:
logging.error('Error while running query: ' + sql)
self.cursor.execute("ROLLBACK")
self.connection.commit()
logging.error(error)
raise error
class PostgresStorage(BaseStorage):
def __init__(self, config):
self.db_connector = DataBaseConnector()
self.db_connector.connect(
port=config['port'],
host=config['host'],
username=config['username'],
password=config['password'],
database_name=config['database_name'],
)
def finish(self):
self.db_connector.disconnect()
def setup_database(self):
try:
self.db_connector.execute_query(create_db_sql)
except Exception as error:
logging.error('Failed to setup database tables')
logging.error(error)
raise error
def drop_database(self):
try:
self.db_connector.execute_query(delete_data_sql)
except Exception as error:
logging.error('Failed to drop database tables')
logging.error(error)
raise error
@staticmethod
def _save_word(word: WordData):
sql = insert_word_sql(word.text, word.position)
for key, gram in word.grams.items():
sql += insert_gram_sql(gram.text, gram.word_position)
return sql
def _save_segment(self, segment: SegmentData):
sql = begin_insert()
sql += insert_segment_sql(segment.text)
for word in segment.words:
sql += self._save_word(word)
sql += end_insert()
self.db_connector.execute_query(sql)
def save_data(self, data: TextData):
try:
maximum = len(data.segments)
for i, segment in enumerate(data.segments):
self._save_segment(segment)
logging.info('Indexing progress: ' + str((i / maximum) * 100) + '%')
except psycopg2.DatabaseError as error:
logging.error('Failed to save text data')
logging.error(error)
raise error
def match_grams_for_words(self, grams: List[GramData], limit=10) -> list:
try:
start_time = time.time()
result = self.db_connector.execute_query(
fuzzy_match_words(grams, limit), fetch=True)
logging.info("Query executed in: %s seconds" % (time.time() - start_time))
logging.info('Query matched')
return result
except psycopg2.DatabaseError as error:
logging.error('Failed to match query')
logging.error(error)
raise error
def match_grams_for_segments(self, grams: List[GramData], limit=10) -> list:
try:
result = self.db_connector.execute_query(fuzzy_match_segments(grams, limit), fetch=True)
logging.info('Query matched')
return result
except psycopg2.DatabaseError as error:
logging.error('Failed to match query')
logging.error(error)
raise error
def match_words_for_segments(self, words: List[WordData], limit=10) -> list:
try:
result = self.db_connector.execute_query(
match_word_for_segments(words, limit), fetch=True)
logging.info('Query matched')
return result
except psycopg2.DatabaseError as error:
logging.error('Failed to match query')
logging.error(error)
def get_db_statistics(self) -> dict:
try:
result = self.db_connector.execute_query(
get_db_statistics(), fetch=True)
return {
'gram_count': result[0][0],
'word_count': result[1][0],
'segment_count': result[2][0],
'gram_word_count': result[3][0],
'gram_segment_count': result[4][0],
'segment_word_count': result[5][0],
}
except psycopg2.DatabaseError as error:
logging.error('Failed to match query')
logging.error(error)
raise error
| 34.772727
| 120
| 0.604248
|
3b22d5d7c0b8aa3ab508be2a4f63da7d09f6b867
| 8,286
|
py
|
Python
|
preprocess.py
|
lbda1/OpenNMT-py-AMR-to-text
|
b59347246fb330feff4139b677014554e8b9aad0
|
[
"MIT"
] | 26
|
2019-03-29T07:52:43.000Z
|
2021-12-29T02:41:31.000Z
|
preprocess.py
|
lbda1/OpenNMT-py-AMR-to-text
|
b59347246fb330feff4139b677014554e8b9aad0
|
[
"MIT"
] | 1
|
2020-02-15T10:31:58.000Z
|
2020-02-17T14:19:26.000Z
|
preprocess.py
|
lbda1/OpenNMT-py-AMR-to-text
|
b59347246fb330feff4139b677014554e8b9aad0
|
[
"MIT"
] | 5
|
2019-06-30T19:57:46.000Z
|
2021-04-01T07:56:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pre-process Data / features files and build vocabulary
"""
import argparse
import glob
import sys
import gc
import codecs
import torch
from onmt.utils.logging import init_logger, logger
import onmt.inputters as inputters
import onmt.opts as opts
def check_existing_pt_files(opt):
""" Checking if there are existing .pt files to avoid tampering """
# We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt
# when training, so check to avoid tampering with existing pt files
# or mixing them up.
for t in ['train', 'valid', 'vocab']:
pattern = opt.save_data + '.' + t + '*.pt'
if glob.glob(pattern):
sys.stderr.write("Please backup existing pt file: %s, "
"to avoid tampering!\n" % pattern)
sys.exit(1)
def parse_args():
""" Parsing arguments """
parser = argparse.ArgumentParser(
description='preprocess.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.preprocess_opts(parser)
opt = parser.parse_args()
torch.manual_seed(opt.seed)
check_existing_pt_files(opt)
return opt
def build_save_in_shards_using_shards_size(src_corpus, tgt_corpus, fields,
corpus_type, opt):
"""
Divide src_corpus and tgt_corpus into smaller multiples
src_copus and tgt corpus files, then build shards, each
shard will have opt.shard_size samples except last shard.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file.
"""
with codecs.open(src_corpus, "r", encoding="utf-8") as fsrc:
with codecs.open(tgt_corpus, "r", encoding="utf-8") as ftgt:
src_data = fsrc.readlines()
tgt_data = ftgt.readlines()
src_corpus = "".join(src_corpus.split(".")[:-1])
tgt_corpus = "".join(tgt_corpus.split(".")[:-1])
num_shards = int(len(src_data) / opt.shard_size)
for x in range(num_shards):
f = codecs.open(src_corpus + ".{0}.txt".format(x), "w",
encoding="utf-8")
f.writelines(
src_data[x * opt.shard_size: (x + 1) * opt.shard_size])
f.close()
f = codecs.open(tgt_corpus + ".{0}.txt".format(x), "w",
encoding="utf-8")
f.writelines(
tgt_data[x * opt.shard_size: (x + 1) * opt.shard_size])
f.close()
num_written = num_shards * opt.shard_size
if len(src_data) > num_written:
f = codecs.open(src_corpus + ".{0}.txt".format(num_shards),
'w', encoding="utf-8")
f.writelines(
src_data[num_shards * opt.shard_size:])
f.close()
f = codecs.open(tgt_corpus + ".{0}.txt".format(num_shards),
'w', encoding="utf-8")
f.writelines(
tgt_data[num_shards * opt.shard_size:])
f.close()
src_list = sorted(glob.glob(src_corpus + '.*.txt'))
tgt_list = sorted(glob.glob(tgt_corpus + '.*.txt'))
ret_list = []
for index, src in enumerate(src_list):
dataset = inputters.build_dataset(
fields, opt.data_type,
src_path=src,
tgt_path=tgt_list[index],
src_dir=opt.src_dir,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window,
image_channel_size=opt.image_channel_size,
reentrancies=opt.reentrancies)
pt_file = "{:s}.{:s}.{:d}.pt".format(
opt.save_data, corpus_type, index)
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
logger.info(" * saving %sth %s data shard to %s."
% (index, corpus_type, pt_file))
torch.save(dataset, pt_file)
ret_list.append(pt_file)
del dataset.examples
gc.collect()
del dataset
gc.collect()
return ret_list
def build_save_dataset(corpus_type, fields, opt):
""" Building and saving the dataset """
assert corpus_type in ['train', 'valid']
if corpus_type == 'train':
src_corpus = opt.train_src
tgt_corpus = opt.train_tgt
else:
src_corpus = opt.valid_src
tgt_corpus = opt.valid_tgt
if (opt.shard_size > 0):
return build_save_in_shards_using_shards_size(src_corpus,
tgt_corpus,
fields,
corpus_type,
opt)
# For data_type == 'img' or 'audio', currently we don't do
# preprocess sharding. We only build a monolithic dataset.
# But since the interfaces are uniform, it would be not hard
# to do this should users need this feature.
dataset = inputters.build_dataset(
fields, opt.data_type,
src_path=src_corpus,
tgt_path=tgt_corpus,
src_dir=opt.src_dir,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window,
image_channel_size=opt.image_channel_size,
reentrancies=opt.reentrancies)
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
pt_file = "{:s}.{:s}.pt".format(opt.save_data, corpus_type)
logger.info(" * saving %s dataset to %s." % (corpus_type, pt_file))
torch.save(dataset, pt_file)
return [pt_file]
def build_save_vocab(train_dataset, fields, opt):
""" Building and saving the vocab """
fields = inputters.build_vocab(train_dataset, fields, opt.data_type,
opt.share_vocab,
opt.src_vocab,
opt.src_vocab_size,
opt.src_words_min_frequency,
opt.tgt_vocab,
opt.tgt_vocab_size,
opt.tgt_words_min_frequency)
# Can't save fields, so remove/reconstruct at training time.
vocab_file = opt.save_data + '.vocab.pt'
torch.save(inputters.save_fields_to_vocab(fields), vocab_file)
def main():
opt = parse_args()
if (opt.max_shard_size > 0):
raise AssertionError("-max_shard_size is deprecated, please use \
-shard_size (number of examples) instead.")
init_logger(opt.log_file)
logger.info("Extracting features...")
src_nfeats = inputters.get_num_features(
opt.data_type, opt.train_src, 'src')
tgt_nfeats = inputters.get_num_features(
opt.data_type, opt.train_tgt, 'tgt')
logger.info(" * number of source features: %d." % src_nfeats)
logger.info(" * number of target features: %d." % tgt_nfeats)
logger.info("Building `Fields` object...")
fields = inputters.get_fields(opt.data_type, src_nfeats, tgt_nfeats)
logger.info("Building & saving training data...")
train_dataset_files = build_save_dataset('train', fields, opt)
logger.info("Building & saving validation data...")
valid_dataset_files = build_save_dataset('valid', fields, opt)
logger.info("Building & saving vocabulary...")
build_save_vocab(train_dataset_files + valid_dataset_files, fields, opt)
if __name__ == "__main__":
main()
| 35.562232
| 79
| 0.585687
|
dc116ea2c4d8027a75206414f2b79ae657e3d3e1
| 3,846
|
py
|
Python
|
experiments/ashvin/icml2020/hand/sparse/bc_l2_sweep1.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/ashvin/icml2020/hand/sparse/bc_l2_sweep1.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/ashvin/icml2020/hand/sparse/bc_l2_sweep1.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
"""
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_stacked_path_loader import DictToMDPStackedPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
if __name__ == "__main__":
variant = dict(
num_epochs=501,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
# policy_class=GaussianMixturePolicy,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256, ],
std_architecture="values",
max_log_std=0,
min_log_std=-6,
# num_gaussians=10,
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=True,
alpha=0,
compute_bc=True,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=0,
policy_weight_decay=1e-4,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=False,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
),
num_exps_per_instance=1,
region='us-west-1',
path_loader_class=DictToMDPStackedPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
stack_obs=1,
),
add_env_demos=True,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
# save_paths=True,
)
search_space = {
'env': ["relocate-sparse-v0", "hammer-sparse-v0", "door-sparse-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [1, ],
'trainer_kwargs.use_automatic_entropy_tuning': [False],
# 'policy_kwargs.max_log_std': [0, ],
# 'policy_kwargs.min_log_std': [-6, ],
# 'policy_kwargs.std': [0.01, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [0.0],
'trainer_kwargs.bc_weight': [1.0, ],
'trainer_kwargs.policy_weight_decay': [1e-3, 1e-4, 1e-5, ],
'trainer_kwargs.policy_lr': [1e-3, 3e-4, 1e-4],
# 'policy_kwargs.num_gaussians': [1, 3, 10, 30],
'path_loader_kwargs.stack_obs': [1, ]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=0)
| 30.768
| 89
| 0.568383
|
564f42379009f7cf4f834832d5878c55213cbfe7
| 5,419
|
py
|
Python
|
parser/team19/BDTytus/Gramaticas/Descendente/gramatica.py
|
strickergt128/tytus
|
93216dd9481ea0775da1d2967dc27be66872537f
|
[
"MIT"
] | null | null | null |
parser/team19/BDTytus/Gramaticas/Descendente/gramatica.py
|
strickergt128/tytus
|
93216dd9481ea0775da1d2967dc27be66872537f
|
[
"MIT"
] | null | null | null |
parser/team19/BDTytus/Gramaticas/Descendente/gramatica.py
|
strickergt128/tytus
|
93216dd9481ea0775da1d2967dc27be66872537f
|
[
"MIT"
] | null | null | null |
#ANALIZADOR LEXICO
#----------------------------------------------------------------------------------------
reservadas = {
'add' : 'ADD',
'all' : 'ALL',
'alter' :'ALTER',
'rand' : 'AND',
'as' : 'AS',
'asc':'ASC',
'between' : 'BETWEEN',
'by' : 'BY',
'case' : 'CASE',
'check' : 'CHECK',
'column' : 'COLUMN',
'constraint' : 'CONSTRAINT',
'create' : 'CREATE',
'current' : 'CURRENT_SESSION',
'database' : 'DATABASE',
'databases' : 'DATABASES',
'delete' : 'DELETE',
'desc' : 'DESC',
'distinct' : 'DISTINCT',
'drop' : 'DROP',
'else' : 'ELSE',
'end' : 'END',
'enum' : 'ENUM',
'except' : 'EXCEPT',
'exists' : 'EXISTS',
'false' : 'FALSE',
'first' : 'FIRST',
'foreign' : 'FOREIGN',
'from' : 'FROM',
'full' : 'FULL',
'greatest' : 'GREATEST',
'group' : 'GROUP',
'having' : 'HAVING',
'if' : 'IF',
'in' : 'IN',
'inherits' : 'INHERITS',
'inner' : 'INNER',
'intersect' : 'INTERSECT',
'into' : 'INTO',
'is' : 'IS',
'isnull' : 'ISNULL',
'join': 'JOIN',
'key': 'KEY',
'last': 'LAST',
'least': 'LEAST',
'left': 'LEFT',
'like': 'LIKE',
'limit': 'LIMIT',
'mode': 'MODE',
'natural': 'NATURAL',
'not': 'NOT',
'notnull': 'NOTNULL',
'null': 'NULL',
'nulls': 'NULLS',
'offset': 'OFFSET',
'on': 'ON',
'or': 'OR',
'order': 'ORDER',
'outer': 'OUTER',
'owner': 'OWNER',
'primary': 'PRIMARY',
'references': 'REFERENCES',
'rename': 'RENAME',
'replace': 'REPLACE',
'returning': 'RETURNING',
'right': 'RIGHT',
'select': 'SELECT',
'session_user': 'SESSION_USER',
'set': 'SET',
'show': 'SHOW',
'symmetric': 'SYMMETRIC',
'table': 'TABLE',
'then': 'THEN',
'true': 'TRUE',
'type': 'TYPE',
'union': 'UNION',
'unique': 'UNIQUE',
'unknow': 'UNKNOW',
'update': 'UPDATE',
'values': 'VALUES',
'when': 'WHEN',
'where': 'WHERE',
'yes': 'YES', #EXTRAS
'no': 'NO',
'off': 'OFF'
}
tokens = [
'PARENT_D',
'PARENT_I',
'LLAVE_ABRE',
'LLAVE_CIERRE',
'COMA',
'P_COMA',
'PUNTO',
'MAS',
'MENOS',
'AND',
'CONCATENACION',
'XOR',
'NOT_SIMBOLO',
'POTENCIA',
'POR',
'DIVISION',
'ENTERO',
'DECIMAL',
'CARACTER',
'CADENA',
'TYPECAST',
'MODULO',
'ORSIGNO',
'SHIFTLEFT',
'SHIFTRIGHT',
'MAYORQUE',
'MENORQUE',
'MAYORIGUAL',
'MENORIGUAL',
'IGUAL',
'DISTINTO',
'DIFERENTE',
'CORABRE',
'CORCIERRE',
'ID',
'BINARIO'
] + list(reservadas.values())
# Tokens
t_PARENT_D = r'\('
t_PARENT_I = r'\)'
t_LLAVE_ABRE = r'\{'
t_LLAVE_CIERRE = r'\}'
t_COMA = r','
t_P_COMA = r';'
t_PUNTO = r'\.'
t_MAS = r'\+'
t_MENOS = r'-'
t_AND = r'&'
t_CONCATENACION = r'\|\|'
t_XOR = r'\#'
t_NOT_SIMBOLO = r'~'
t_POTENCIA = r'\^'
t_POR = r'\*'
t_DIVISION = r'/'
t_TYPECAST = r'[:]{2}'
t_MODULO = r'%'
t_ORSIGNO = r'[|]'
t_SHIFTLEFT = r'<<'
t_SHIFTRIGHT = r'>>'
t_MAYORIGUAL = r'>='
t_MENORIGUAL = r'<='
t_DISTINTO = r'<>'
t_MAYORQUE = r'>'
t_MENORQUE = r'<'
t_IGUAL = r'='
t_DIFERENTE = r'!='
t_CORABRE = r'\['
t_CORCIERRE = r']'
def t_ENTERO(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
def t_DECIMAL(t):
r'\d+\.\d+'
try:
t.value = float(t.value)
except ValueError:
print("Float value too large %d", t.value)
t.value = 0
return t
def t_CARACTER(t):
r'\'.?\''
t.value = t.value[1:-1] # remuevo las comillas simples
return t
def t_CADENA(t):
r'\".*?\"'
t.value = t.value[1:-1] # remuevo las comillas dobles
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
t.type = reservadas.get(t.value.lower(), 'ID')
return t
def t_COMMENTLINE(t):
r'-{2}[^\n]*(\n|\Z)'
t.lexer.lineno += 1
print("Comentario de una linea leido: "+ t.value[2:])
def t_COMMENTMULTI(t):
r'[/][*][^*]*[*]+([^/*][^*]*[*]+)*[/]'
t.lexer.lineno += t.value.count('\n')
print("Comentario multilinea leido: "+ t.value[2:-2])
def t_BINARIO(t):
r'B\'[0-1]+\''
t.value = t.value[2:-1] #Remuevo las comillas y la B al inicio
return t
# Caracteres ignorados
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print("Carácter ilegal '%s'" % t.value[0])
t.lexer.skip(1)
# Construyendo el analizador léxico
import ply.lex as lex
lexer = lex.lex()
#ANALIZADOR SINTACTICO
#----------------------------------------------------------------------------------------
def p_expresiones_evaluar(t):
'''expresiones : expresion expresiones_prima '''
def p_expresiones_prima(t):
'''expresiones_prima : expresion expresiones_prima
| '''
def p_expresion_evaluar(t):
'''expresion : select_expresion '''
def p_enumerated_type(t):
'select_expresion : SELECT POR FROM ID '
def p_error(t):
print("Error sintáctico en " + str(t.value) + ", Fila: " + str(t.lexer.lineno))
# Construyendo el analizador sintactico
import ply.yacc as yacc
parser = yacc.yacc()
f = open("./entrada.txt", "r")
input = f.read()
parser.parse(input)
| 21.589641
| 89
| 0.498062
|
fc8c3db3bf355966288084c5b82279b85192d38d
| 4,568
|
py
|
Python
|
Muta3DMaps/core/Mods/ProcessI3D.py
|
NatureGeorge/SIFTS_Plus_Muta_Maps
|
60f84e6024508e65ee3791103762b95666d3c646
|
[
"MIT"
] | null | null | null |
Muta3DMaps/core/Mods/ProcessI3D.py
|
NatureGeorge/SIFTS_Plus_Muta_Maps
|
60f84e6024508e65ee3791103762b95666d3c646
|
[
"MIT"
] | null | null | null |
Muta3DMaps/core/Mods/ProcessI3D.py
|
NatureGeorge/SIFTS_Plus_Muta_Maps
|
60f84e6024508e65ee3791103762b95666d3c646
|
[
"MIT"
] | null | null | null |
# @Date: 2019-08-16T23:34:20+08:00
# @Email: 1730416009@stu.suda.edu.cn
# @Filename: ProcessI3D.py
# @Last modified time: 2019-11-25T21:23:31+08:00
import pandas as pd
import wget, time, os
from urllib import request
from retrying import retry
from multiprocessing.dummy import Pool
from xml.etree import ElementTree
from ..Utils.Logger import RunningLogger
from ..Utils.FileIO import file_o
class RetrieveI3D:
# Reference: https://interactome3d.irbbarcelona.org/help.php#restful
CONFIG = {
'INTERACTION_SIFTS_COL': ['i3d_TYPE', 'pdb_id', 'i3d_BIO_UNIT',
'i3d_FILENAME', 'i3d_SAME_MODEL',
'i3d_CHAIN_COMPO', 'i3d_pdb_type',
'i3d_INTERACT_COMPO', 'Entry',
'chain_id', 'i3d_MODEL',
'i3d_SEQ_IDENT', 'i3d_COVERAGE',
'i3d_DOMAIN', 'i3d_model_len', 'i3d_model_range'
],
'DOWNLOAD_URL': 'https://interactome3d.irbbarcelona.org/api/%s?', # %s=%s&%s=%s
}
def __init__(self, **kwargs):
self.downloadFolder = kwargs["downloadFolder"]
self.Logger = RunningLogger("RetrieveI3D", kwargs["loggingPath"])
def get_interactions_meta(self, species='human', struct_type=None, filePath=None, related_unp=None, related_pdb=None, outputPath=None):
if filePath is None:
url = "https://interactome3d.irbbarcelona.org/user_data/%s/download/complete/interactions.dat" % species
filePath = os.path.join(self.downloadFolder, 'I3D_META_interactions_%s_%s.dat' % (species, time.strftime("%Y_%m_%d", time.localtime())))
self.Logger.logger.info("Downloading File: %s" % filePath)
wget.download(url, out=filePath)
self.Logger.logger.info("\n")
dfrm = pd.read_csv(filePath, sep='\t', na_values=["-"])
if struct_type is not None:
dfrm = dfrm[dfrm['TYPE'] == struct_type].reset_index(drop=True)
dfrm["i3d_SAME_MODEL"] = dfrm.apply(lambda x: x["MODEL1"] == x["MODEL2"], axis=1)
dfrm['i3d_CHAIN_COMPO'] = dfrm.apply(lambda x: '%s_%s' % (x['CHAIN1'], x['CHAIN2']), axis=1)
dfrm["i3d_pdb_type"] = dfrm.apply(lambda x: "ho" if x['PROT1'] == x['PROT2'] else "he", axis=1)
dfrm['i3d_INTERACT_COMPO'] = dfrm.apply(lambda x: '%s_%s' % tuple(sorted([x['PROT1'], x['PROT2']])), axis=1) # Need to check
dfrm['PDB_ID'] = dfrm.apply(lambda x: x['PDB_ID'].upper(), axis=1)
common_cols = ['TYPE', 'PDB_ID', 'BIO_UNIT', 'FILENAME', 'i3d_SAME_MODEL', 'i3d_CHAIN_COMPO', 'i3d_pdb_type', 'i3d_INTERACT_COMPO']
s_cols = ['PROT', 'CHAIN', 'MODEL', 'SEQ_IDENT', 'COVERAGE', 'SEQ_BEGIN', 'SEQ_END', 'DOMAIN']
get_s_cols = lambda num: ['%s%s' % (i, num) for i in s_cols]
df1, df2 = dfrm[common_cols+get_s_cols(1)].copy(), dfrm[common_cols+get_s_cols(2)].copy()
df1.columns, df2.columns = common_cols+s_cols, common_cols+s_cols
df12 = pd.concat([df1, df2]).reset_index(drop=True)
df12['i3d_model_len'] = df12.apply(lambda x: x['SEQ_END'] - x['SEQ_BEGIN'] + 1, axis=1)
df12['i3d_model_range'] = df12.apply(lambda x: '[[%d, %d]]' % (x['SEQ_BEGIN'], x['SEQ_END']), axis=1)
if related_unp is not None:
df12 = df12[df12['PROT'].isin(related_unp)]
if related_pdb is not None:
df12 = df12[df12['PDB_ID'].isin(related_pdb)]
df12.drop(columns=['SEQ_BEGIN', 'SEQ_END'], inplace=True)
self.Logger.logger.warning("Raw Order of I3D Column Names: " + str(df12.columns))
df12.columns = self.CONFIG['INTERACTION_SIFTS_COL']
file_o(outputPath, df12)
return df12
@retry(stop_max_attempt_number=3, wait_fixed=1000)
def download_pdb_from_Interactome3D(self, filename, type='interaction'):
url = RetrieveI3D.CONFIG['DOWNLOAD_URL'] % 'getPdbFile' + 'filename=%s&type=%s' % (filename, type)
xmlPage = request.urlopen(url).read()
xmlPage = xmlPage.decode('utf-8')
node = ElementTree.XML(xmlPage)
with open(os.path.join(self.downloadFolder, filename), 'w') as fw:
fw.write(node[0][0][1].text)
time.sleep(2)
def download_model_script(self, fileName_list, chunksize=100):
for i in range(0, len(fileName_list), chunksize):
chunk_li = fileName_list[i:i+chunksize]
pool = Pool(processes=20)
pool.map(self.download_pdb_from_Interactome3D, chunk_li)
| 49.652174
| 148
| 0.616243
|
8c876eecc08108ee97c29e4a222f00f7c6df18b9
| 6,438
|
py
|
Python
|
webapi/tct-security-tcs-tests/inst.xpk.py
|
linshen/crosswalk-test-suite
|
e206b2c35fc09e583f3202fc7fc8a656c8e2b5de
|
[
"BSD-3-Clause"
] | null | null | null |
webapi/tct-security-tcs-tests/inst.xpk.py
|
linshen/crosswalk-test-suite
|
e206b2c35fc09e583f3202fc7fc8a656c8e2b5de
|
[
"BSD-3-Clause"
] | null | null | null |
webapi/tct-security-tcs-tests/inst.xpk.py
|
linshen/crosswalk-test-suite
|
e206b2c35fc09e583f3202fc7fc8a656c8e2b5de
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| 29.805556
| 106
| 0.559491
|
cc55177840ffd09963c3c065b4c3adc3219f3e5c
| 1,453
|
py
|
Python
|
examples/quickstart/show_config.py
|
mvdoc/pycortex
|
bc8a93cac9518e3c1cd89650c703f9f3814e805b
|
[
"BSD-2-Clause"
] | 423
|
2015-01-06T02:46:46.000Z
|
2022-03-23T17:20:38.000Z
|
examples/quickstart/show_config.py
|
mvdoc/pycortex
|
bc8a93cac9518e3c1cd89650c703f9f3814e805b
|
[
"BSD-2-Clause"
] | 243
|
2015-01-03T02:10:03.000Z
|
2022-03-31T19:29:48.000Z
|
examples/quickstart/show_config.py
|
mvdoc/pycortex
|
bc8a93cac9518e3c1cd89650c703f9f3814e805b
|
[
"BSD-2-Clause"
] | 136
|
2015-03-23T20:35:59.000Z
|
2022-03-09T13:39:10.000Z
|
"""
=====================================================
Finding out where the config and filestore are
=====================================================
Easily locating your config file and filestore locations.
This comes in useful when things don't work because the config file is not set correctly.
"""
from __future__ import print_function
import cortex
from cortex.options import config
##########################################################
# Finding where your config file is.
print(cortex.options.usercfg)
##########################################################
# Finding where the current filestore is.
# Useful for when your subjects don't show up in cortex.db, and all you have is S1.
print(config.get('basic', 'filestore'))
##########################################################
# Finding where pycortex is looking for colormaps.
# Useful for when you get color map not found messages.
print(config.get('webgl', 'colormaps'))
##########################################################
# To look at your config file, it is recommended that you open it with a text editor.
# However, you *can* still look at options from within pycortex.
# sections gets the upper-level sections in the config file
sections = config.sections()
print(sections)
# items gets the option items within a section as a list of key-value pairs.
basic_config = config.items('paths_default')
print(basic_config)
| 39.27027
| 90
| 0.572608
|
718ae37a2d0621d4bb9413b03e79621ab4169923
| 2,955
|
py
|
Python
|
mbta/models.py
|
MAPC/bostonparks
|
5efa651736bd9cd7c42372922c50286ff2a61262
|
[
"BSD-3-Clause"
] | null | null | null |
mbta/models.py
|
MAPC/bostonparks
|
5efa651736bd9cd7c42372922c50286ff2a61262
|
[
"BSD-3-Clause"
] | 1
|
2015-05-01T01:30:49.000Z
|
2015-05-01T01:30:49.000Z
|
mbta/models.py
|
MAPC/bostonparks
|
5efa651736bd9cd7c42372922c50286ff2a61262
|
[
"BSD-3-Clause"
] | 2
|
2015-05-01T01:31:38.000Z
|
2019-10-28T05:20:16.000Z
|
from django.contrib.gis.db import models
# Create your models here.
class MBTAStop(models.Model): # Stops.csv
stop_id = models.CharField(max_length=256)
stop_code = models.CharField(max_length=256)
stop_name = models.CharField(max_length=256)
stop_desc = models.CharField(max_length=256)
zone_id = models.CharField(max_length=256)
stop_url = models.TextField()
location_type = models.CharField(max_length=256)
parent_station = models.CharField(max_length=256)
lat_long = models.PointField(blank=True, null=True,srid=26986)
objects = models.GeoManager() # required for anything special like __distance_gt
# ALL FILES LOCATED IN /home/django/MBTA_DATA
class MBTAAgency(models.Model):
agency_id = models.CharField(max_length=256)
agency_name = models.CharField(max_length=256)
agency_url = models.TextField()
agency_timezone = models.CharField(max_length=256)
agency_lang = models.CharField(max_length=2)
agency_phone = models.CharField(max_length=16)
#
class MBTACalendar(models.Model):
service_id = models.CharField(max_length=256)
monday = models.BooleanField(default=False)
tuesday = models.BooleanField(default=False)
wednesday = models.BooleanField(default=False)
thursday = models.BooleanField(default=False)
friday = models.BooleanField(default=False)
saturday = models.BooleanField(default=False)
sunday = models.BooleanField(default=False)
start_date = models.DateField()
end_date = models.DateField()
#
#class MBTACalendarDate(models.Model):
# service_id
# date
# exception_type
#
class MBTACFeedInfo(models.Model):
feed_publisher_name = models.CharField(max_length=256)
feed_publisher_url = models.TextField()
feed_lang = models.CharField(max_length=2)
feed_start_date = models.DateField()
feed_end_date = models.DateField()
feed_version = models.CharField(max_length=256)
#
class MBTAFrequencies(models.Model):
trip_id = models.CharField(max_length=256)
start_time = models.TimeField()
end_time = models.TimeField()
headway_secs = models.IntegerField()
#
#class MBTARoutes(models.Model):
# route_id = models.CharField(max_length=256)
# agency_id = models.CharField(max_length=256)
# route_short_name
# route_long_name
# route_desc
# route_type
# route_url
# route_color
# route_text_color
#
#class MBTAShapes(models.Model):
# shape_id
# shape_pt_lat
# shape_pt_lon
# shape_pt_sequence
# shape_dist_traveled
#
#class MBTAStopTimes(models.Model):
# trip_id
# arrival_time
# departure_time
# stop_id
# stop_sequence
# stop_headsign
# pickup_type
# drop_off_type
#
#class MBTATransfers(models.Model):
# from_stop_id
# to_stop_id
# transfer_type
# min_transfer_time
#
#class MBTATrips(models.Model):
# route_id
# service_id
# trip_id
# trip_headsign
# direction_id
# block_id
# shape_id
| 28.68932
| 86
| 0.731641
|
c2f821f3bf147e3783a05613f188203e201c157b
| 19,827
|
py
|
Python
|
mergify_engine/tests/unit/test_worker.py
|
GuillaumeOj/mergify-engine
|
c11c05d2fa7db8e7195c57147f1c6a845009a718
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/tests/unit/test_worker.py
|
GuillaumeOj/mergify-engine
|
c11c05d2fa7db8e7195c57147f1c6a845009a718
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/tests/unit/test_worker.py
|
GuillaumeOj/mergify-engine
|
c11c05d2fa7db8e7195c57147f1c6a845009a718
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import asyncio
import sys
import time
from unittest import mock
from freezegun import freeze_time
import httpx
import pytest
from mergify_engine import exceptions
from mergify_engine import logs
from mergify_engine import utils
from mergify_engine import worker
from mergify_engine.clients import http
if sys.version_info < (3, 8):
# https://github.com/pytest-dev/pytest-asyncio/issues/69
pytest.skip(
"mock + pytest-asyncio requires python3.8 or higher",
allow_module_level=True,
)
@pytest.fixture()
async def redis():
r = await utils.create_aredis_for_stream()
await r.flushdb()
try:
yield r
finally:
await r.flushdb()
r.connection_pool.disconnect()
async def run_worker():
w = worker.Worker()
w.start()
timeout = 10
started_at = time.monotonic()
while (
w._redis is None or (await w._redis.zcard("streams")) > 0
) and time.monotonic() - started_at < timeout:
await asyncio.sleep(0.5)
w.stop()
await w.wait_shutdown_complete()
fake_subscription = {}
@pytest.mark.asyncio
@mock.patch("mergify_engine.worker.run_engine")
async def test_worker_with_waiting_tasks(run_engine, redis, logger_checker):
stream_names = []
for installation_id in range(8):
for pull_number in range(2):
for data in range(3):
owner = f"owner-{installation_id}"
repo = f"repo-{installation_id}"
stream_names.append(f"stream~owner-{installation_id}")
await worker.push(
redis,
owner,
repo,
pull_number,
"pull_request",
{"payload": data},
)
# Check everything we push are in redis
assert 8 == (await redis.zcard("streams"))
assert 8 == len(await redis.keys("stream~*"))
for stream_name in stream_names:
assert 6 == (await redis.xlen(stream_name))
await run_worker()
# Check redis is empty
assert 0 == (await redis.zcard("streams"))
assert 0 == len(await redis.keys("stream~*"))
assert 0 == len(await redis.hgetall("attempts"))
# Check engine have been run with expect data
assert 16 == len(run_engine.mock_calls)
assert (
mock.call(
"owner-0",
"repo-0",
0,
[
{"event_type": "pull_request", "data": {"payload": 0}},
{"event_type": "pull_request", "data": {"payload": 1}},
{"event_type": "pull_request", "data": {"payload": 2}},
],
)
in run_engine.mock_calls
)
@pytest.mark.asyncio
@mock.patch("mergify_engine.worker.run_engine")
@mock.patch("mergify_engine.clients.github.aget_client")
@mock.patch("mergify_engine.github_events.extract_pull_numbers_from_event")
async def test_worker_expanded_events(
extract_pull_numbers_from_event,
aget_client,
run_engine,
redis,
logger_checker,
):
client = mock.Mock(
name="foo",
owner="owner",
repo="repo",
auth=mock.Mock(installation={"id": 12345}, owner="owner", repo="repo"),
)
client.__aenter__ = mock.AsyncMock(return_value=client)
client.__aexit__ = mock.AsyncMock()
client.items.return_value = mock.AsyncMock()
aget_client.return_value = client
extract_pull_numbers_from_event.return_value = [123, 456, 789]
await worker.push(
redis,
"owner",
"repo",
123,
"pull_request",
{"payload": "whatever"},
)
await worker.push(
redis,
"owner",
"repo",
None,
"comment",
{"payload": "foobar"},
)
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 2 == (await redis.xlen("stream~owner"))
await run_worker()
# Check redis is empty
assert 0 == (await redis.zcard("streams"))
assert 0 == len(await redis.keys("stream~*"))
assert 0 == len(await redis.hgetall("attempts"))
# Check engine have been run with expect data
assert 3 == len(run_engine.mock_calls)
assert run_engine.mock_calls[0] == mock.call(
"owner",
"repo",
123,
[
{"event_type": "pull_request", "data": {"payload": "whatever"}},
{"event_type": "comment", "data": {"payload": "foobar"}},
],
)
assert run_engine.mock_calls[1] == mock.call(
"owner",
"repo",
456,
[
{"event_type": "comment", "data": {"payload": "foobar"}},
],
)
assert run_engine.mock_calls[2] == mock.call(
"owner",
"repo",
789,
[
{"event_type": "comment", "data": {"payload": "foobar"}},
],
)
@pytest.mark.asyncio
@mock.patch("mergify_engine.worker.run_engine")
async def test_worker_with_one_task(run_engine, redis, logger_checker):
await worker.push(
redis,
"owner",
"repo",
123,
"pull_request",
{"payload": "whatever"},
)
await worker.push(
redis,
"owner",
"repo",
123,
"comment",
{"payload": "foobar"},
)
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 2 == (await redis.xlen("stream~owner"))
await run_worker()
# Check redis is empty
assert 0 == (await redis.zcard("streams"))
assert 0 == len(await redis.keys("stream~*"))
assert 0 == len(await redis.hgetall("attempts"))
# Check engine have been run with expect data
assert 1 == len(run_engine.mock_calls)
assert run_engine.mock_calls[0] == mock.call(
"owner",
"repo",
123,
[
{"event_type": "pull_request", "data": {"payload": "whatever"}},
{"event_type": "comment", "data": {"payload": "foobar"}},
],
)
@pytest.mark.asyncio
@mock.patch("mergify_engine.worker.run_engine")
async def test_consume_unexisting_stream(run_engine, redis, logger_checker):
p = worker.StreamProcessor(redis)
await p.consume("stream~notexists")
assert len(run_engine.mock_calls) == 0
@pytest.mark.asyncio
@mock.patch("mergify_engine.worker.run_engine")
async def test_consume_good_stream(run_engine, redis, logger_checker):
await worker.push(
redis,
"owner",
"repo",
123,
"pull_request",
{"payload": "whatever"},
)
await worker.push(
redis,
"owner",
"repo",
123,
"comment",
{"payload": "foobar"},
)
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 2 == await redis.xlen("stream~owner")
assert 0 == len(await redis.hgetall("attempts"))
p = worker.StreamProcessor(redis)
await p.consume("stream~owner")
assert len(run_engine.mock_calls) == 1
assert run_engine.mock_calls[0] == mock.call(
"owner",
"repo",
123,
[
{"event_type": "pull_request", "data": {"payload": "whatever"}},
{"event_type": "comment", "data": {"payload": "foobar"}},
],
)
# Check redis is empty
assert 0 == (await redis.zcard("streams"))
assert 0 == len(await redis.keys("stream~*"))
assert 0 == len(await redis.hgetall("attempts"))
@pytest.mark.asyncio
@mock.patch("mergify_engine.worker.daiquiri.getLogger")
@mock.patch("mergify_engine.worker.run_engine")
async def test_stream_processor_retrying_pull(run_engine, logger_class, redis):
logs.setup_logging()
logger = logger_class.return_value
# One retries once, the other reaches max_retry
run_engine.side_effect = [
exceptions.MergeableStateUnknown(mock.Mock()),
exceptions.MergeableStateUnknown(mock.Mock()),
mock.Mock(),
exceptions.MergeableStateUnknown(mock.Mock()),
exceptions.MergeableStateUnknown(mock.Mock()),
]
await worker.push(
redis,
"owner",
"repo",
123,
"pull_request",
{"payload": "whatever"},
)
await worker.push(
redis,
"owner",
"repo",
42,
"comment",
{"payload": "foobar"},
)
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 2 == await redis.xlen("stream~owner")
assert 0 == len(await redis.hgetall("attempts"))
p = worker.StreamProcessor(redis)
await p.consume("stream~owner")
assert len(run_engine.mock_calls) == 2
assert run_engine.mock_calls == [
mock.call(
"owner",
"repo",
123,
[
{"event_type": "pull_request", "data": {"payload": "whatever"}},
],
),
mock.call(
"owner",
"repo",
42,
[
{"event_type": "comment", "data": {"payload": "foobar"}},
],
),
]
# Check stream still there and attempts recorded
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert {
b"pull~owner~repo~42": b"1",
b"pull~owner~repo~123": b"1",
} == await redis.hgetall("attempts")
await p.consume("stream~owner")
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 1 == len(await redis.hgetall("attempts"))
assert len(run_engine.mock_calls) == 4
assert {b"pull~owner~repo~42": b"2"} == await redis.hgetall("attempts")
await p.consume("stream~owner")
assert len(run_engine.mock_calls) == 5
# Too many retries, everything is gone
assert 3 == len(logger.info.mock_calls)
assert 1 == len(logger.error.mock_calls)
assert logger.info.mock_calls[0].args == (
"failed to process pull request, retrying",
)
assert logger.info.mock_calls[1].args == (
"failed to process pull request, retrying",
)
assert logger.error.mock_calls[0].args == (
"failed to process pull request, abandoning",
)
assert 0 == (await redis.zcard("streams"))
assert 0 == len(await redis.keys("stream~*"))
assert 0 == len(await redis.hgetall("attempts"))
@pytest.mark.asyncio
@mock.patch.object(worker, "LOG")
@mock.patch("mergify_engine.worker.run_engine")
async def test_stream_processor_retrying_stream_recovered(run_engine, logger, redis):
logs.setup_logging()
response = mock.Mock()
response.json.return_value = {"message": "boom"}
response.status_code = 401
run_engine.side_effect = http.HTTPClientSideError(
message="foobar", request=response.request, response=response
)
await worker.push(
redis,
"owner",
"repo",
123,
"pull_request",
{"payload": "whatever"},
)
await worker.push(
redis,
"owner",
"repo",
123,
"comment",
{"payload": "foobar"},
)
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 2 == await redis.xlen("stream~owner")
assert 0 == len(await redis.hgetall("attempts"))
p = worker.StreamProcessor(redis)
await p.consume("stream~owner")
assert len(run_engine.mock_calls) == 1
assert run_engine.mock_calls[0] == mock.call(
"owner",
"repo",
123,
[
{"event_type": "pull_request", "data": {"payload": "whatever"}},
{"event_type": "comment", "data": {"payload": "foobar"}},
],
)
# Check stream still there and attempts recorded
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 1 == len(await redis.hgetall("attempts"))
assert {b"stream~owner": b"1"} == await redis.hgetall("attempts")
run_engine.side_effect = None
await p.consume("stream~owner")
assert len(run_engine.mock_calls) == 2
assert 0 == (await redis.zcard("streams"))
assert 0 == len(await redis.keys("stream~*"))
assert 0 == len(await redis.hgetall("attempts"))
assert 1 == len(logger.info.mock_calls)
assert 0 == len(logger.error.mock_calls)
assert logger.info.mock_calls[0].args == ("failed to process stream, retrying",)
@pytest.mark.asyncio
@mock.patch.object(worker, "LOG")
@mock.patch("mergify_engine.worker.run_engine")
async def test_stream_processor_retrying_stream_failure(run_engine, logger, redis):
logs.setup_logging()
response = mock.Mock()
response.json.return_value = {"message": "boom"}
response.status_code = 401
run_engine.side_effect = http.HTTPClientSideError(
message="foobar", request=response.request, response=response
)
await worker.push(
redis,
"owner",
"repo",
123,
"pull_request",
{"payload": "whatever"},
)
await worker.push(
redis,
"owner",
"repo",
123,
"comment",
{"payload": "foobar"},
)
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 2 == await redis.xlen("stream~owner")
assert 0 == len(await redis.hgetall("attempts"))
p = worker.StreamProcessor(redis)
await p.consume("stream~owner")
assert len(run_engine.mock_calls) == 1
assert run_engine.mock_calls[0] == mock.call(
"owner",
"repo",
123,
[
{"event_type": "pull_request", "data": {"payload": "whatever"}},
{"event_type": "comment", "data": {"payload": "foobar"}},
],
)
# Check stream still there and attempts recorded
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 1 == len(await redis.hgetall("attempts"))
assert {b"stream~owner": b"1"} == await redis.hgetall("attempts")
await p.consume("stream~owner")
assert len(run_engine.mock_calls) == 2
assert {b"stream~owner": b"2"} == await redis.hgetall("attempts")
await p.consume("stream~owner")
assert len(run_engine.mock_calls) == 3
# Still there
assert 3 == len(logger.info.mock_calls)
assert 0 == len(logger.error.mock_calls)
assert logger.info.mock_calls[0].args == ("failed to process stream, retrying",)
assert logger.info.mock_calls[1].args == ("failed to process stream, retrying",)
assert logger.info.mock_calls[2].args == ("failed to process stream, retrying",)
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 1 == len(await redis.hgetall("attempts"))
@pytest.mark.asyncio
@mock.patch("mergify_engine.worker.daiquiri.getLogger")
@mock.patch("mergify_engine.worker.run_engine")
async def test_stream_processor_pull_unexpected_error(run_engine, logger_class, redis):
logs.setup_logging()
logger = logger_class.return_value
run_engine.side_effect = Exception
await worker.push(
redis,
"owner",
"repo",
123,
"pull_request",
{"payload": "whatever"},
)
p = worker.StreamProcessor(redis)
await p.consume("stream~owner")
await p.consume("stream~owner")
# Exception have been logged, redis must be clean
assert len(run_engine.mock_calls) == 2
assert len(logger.error.mock_calls) == 2
assert logger.error.mock_calls[0].args == ("failed to process pull request",)
assert logger.error.mock_calls[1].args == ("failed to process pull request",)
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 0 == len(await redis.hgetall("attempts"))
@pytest.mark.asyncio
@mock.patch("mergify_engine.worker.run_engine")
async def test_stream_processor_date_scheduling(run_engine, redis, logger_checker):
# Don't process it before 2040
with freeze_time("2040-01-01"):
await worker.push(
redis,
"owner1",
"repo",
123,
"pull_request",
{"payload": "whatever"},
)
unwanted_owner_id = "owner1"
with freeze_time("2020-01-01"):
await worker.push(
redis,
"owner2",
"repo",
321,
"pull_request",
{"payload": "foobar"},
)
wanted_owner_id = "owner2"
assert 2 == (await redis.zcard("streams"))
assert 2 == len(await redis.keys("stream~*"))
assert 0 == len(await redis.hgetall("attempts"))
s = worker.StreamSelector(1, redis)
p = worker.StreamProcessor(redis)
received = []
def fake_engine(owner, repo, pull_number, sources):
received.append(owner)
run_engine.side_effect = fake_engine
with freeze_time("2020-01-14"):
async with s.next_stream() as stream_name:
assert stream_name is not None
await p.consume(stream_name)
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 0 == len(await redis.hgetall("attempts"))
assert received == [wanted_owner_id]
with freeze_time("2030-01-14"):
async with s.next_stream() as stream_name:
assert stream_name is None
assert 1 == (await redis.zcard("streams"))
assert 1 == len(await redis.keys("stream~*"))
assert 0 == len(await redis.hgetall("attempts"))
assert received == [wanted_owner_id]
# We are in 2041, we have something todo :)
with freeze_time("2041-01-14"):
async with s.next_stream() as stream_name:
assert stream_name is not None
await p.consume(stream_name)
assert 0 == (await redis.zcard("streams"))
assert 0 == len(await redis.keys("stream~*"))
assert 0 == len(await redis.hgetall("attempts"))
assert received == [wanted_owner_id, unwanted_owner_id]
@pytest.mark.asyncio
async def test_worker_debug_report(redis, logger_checker):
stream_names = []
for installation_id in range(8):
for pull_number in range(2):
for data in range(3):
owner = f"owner-{installation_id}"
repo = f"repo-{installation_id}"
stream_names.append(f"stream~owner-{installation_id}")
await worker.push(
redis,
owner,
repo,
pull_number,
"pull_request",
{"payload": data},
)
await worker.async_status()
@pytest.mark.asyncio
@mock.patch("mergify_engine.worker.run_engine")
async def test_stream_processor_retrying_after_read_error(run_engine, redis):
response = mock.Mock()
response.json.return_value = {"message": "boom"}
response.status_code = 503
run_engine.side_effect = httpx.ReadError(
"Server disconnected while attempting read",
request=mock.Mock(),
)
p = worker.StreamProcessor(redis)
with pytest.raises(worker.StreamRetry):
await p._run_engine_and_translate_exception_to_retries(
"stream-owner", "owner", "repo", 1234, []
)
| 29.416914
| 87
| 0.60231
|
21e54d48216fc15030f876f6cc6181428cec9170
| 4,370
|
py
|
Python
|
evalml/pipelines/components/estimators/estimator.py
|
ObinnaObeleagu/evalml
|
3b5bf62b08a5a5bc6485ba5387a08c32e1857473
|
[
"BSD-3-Clause"
] | 1
|
2021-07-28T14:20:35.000Z
|
2021-07-28T14:20:35.000Z
|
evalml/pipelines/components/estimators/estimator.py
|
ObinnaObeleagu/evalml
|
3b5bf62b08a5a5bc6485ba5387a08c32e1857473
|
[
"BSD-3-Clause"
] | null | null | null |
evalml/pipelines/components/estimators/estimator.py
|
ObinnaObeleagu/evalml
|
3b5bf62b08a5a5bc6485ba5387a08c32e1857473
|
[
"BSD-3-Clause"
] | null | null | null |
from abc import abstractmethod
from pandas.core.indexes import range
from evalml.exceptions import MethodPropertyNotFoundError
from evalml.model_family import ModelFamily
from evalml.pipelines.components import ComponentBase
from evalml.utils import infer_feature_types
class Estimator(ComponentBase):
"""A component that fits and predicts given data.
To implement a new Estimator, define your own class which is a subclass of Estimator, including
a name and a list of acceptable ranges for any parameters to be tuned during the automl search (hyperparameters).
Define an `__init__` method which sets up any necessary state and objects. Make sure your `__init__` only
uses standard keyword arguments and calls `super().__init__()` with a parameters dict. You may also override the
`fit`, `transform`, `fit_transform` and other methods in this class if appropriate.
To see some examples, check out the definitions of any Estimator component.
Arguments:
parameters (dict): Dictionary of parameters for the component. Defaults to None.
component_obj (obj): Third-party objects useful in component implementation. Defaults to None.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
# We can't use the inspect module to dynamically determine this because of issue 1582
predict_uses_y = False
model_family = ModelFamily.NONE
"""ModelFamily.NONE"""
modifies_features = True
modifies_target = False
@property
@classmethod
@abstractmethod
def supported_problem_types(cls):
"""Problem types this estimator supports"""
def __init__(self, parameters=None, component_obj=None, random_seed=0, **kwargs):
self.input_feature_names = None
super().__init__(
parameters=parameters,
component_obj=component_obj,
random_seed=random_seed,
**kwargs
)
def _manage_woodwork(self, X, y=None):
"""Function to convert the input and target data to Pandas data structures."""
if X is not None:
X = infer_feature_types(X)
if y is not None:
y = infer_feature_types(y)
return X, y
def fit(self, X, y=None):
X, y = self._manage_woodwork(X, y)
self.input_feature_names = list(X.columns)
self._component_obj.fit(X, y)
return self
def predict(self, X):
"""Make predictions using selected features.
Arguments:
X (pd.DataFrame, np.ndarray): Data of shape [n_samples, n_features]
Returns:
pd.Series: Predicted values
"""
try:
X = infer_feature_types(X)
if isinstance(X.columns, range.RangeIndex):
X.columns = [x for x in X.columns]
predictions = self._component_obj.predict(X)
except AttributeError:
raise MethodPropertyNotFoundError(
"Estimator requires a predict method or a component_obj that implements predict"
)
return infer_feature_types(predictions)
def predict_proba(self, X):
"""Make probability estimates for labels.
Arguments:
X (pd.DataFrame, or np.ndarray): Features
Returns:
pd.Series: Probability estimates
"""
try:
X = infer_feature_types(X)
pred_proba = self._component_obj.predict_proba(X)
except AttributeError:
raise MethodPropertyNotFoundError(
"Estimator requires a predict_proba method or a component_obj that implements predict_proba"
)
return infer_feature_types(pred_proba)
@property
def feature_importance(self):
"""Returns importance associated with each feature.
Returns:
np.ndarray: Importance associated with each feature
"""
try:
return self._component_obj.feature_importances_
except AttributeError:
raise MethodPropertyNotFoundError(
"Estimator requires a feature_importance property or a component_obj that implements feature_importances_"
)
def __eq__(self, other):
return (
super().__eq__(other)
and self.supported_problem_types == other.supported_problem_types
)
| 35.819672
| 122
| 0.661556
|
b36849a5e073b156a4d691268d1790eebba70920
| 587
|
py
|
Python
|
scripts/configs/_base_/recog_datasets/aic_test.py
|
hoho303/AIC_2021
|
fcb626db91ed81bcb3f72eb7868106cfcb5d92fb
|
[
"Apache-2.0"
] | null | null | null |
scripts/configs/_base_/recog_datasets/aic_test.py
|
hoho303/AIC_2021
|
fcb626db91ed81bcb3f72eb7868106cfcb5d92fb
|
[
"Apache-2.0"
] | null | null | null |
scripts/configs/_base_/recog_datasets/aic_test.py
|
hoho303/AIC_2021
|
fcb626db91ed81bcb3f72eb7868106cfcb5d92fb
|
[
"Apache-2.0"
] | null | null | null |
test_prefix = '/content/drive/Shareddrives/Google Drive/AIClub/HCM_AI_CHALLENGE/Data/Vin_text/Reg/Crop/'
test_ann_file ='/content/drive/Shareddrives/Google Drive/AIClub/HCM_AI_CHALLENGE/Data/Vin_text/Reg/test.txt'
test = dict(
type='OCRDataset',
img_prefix=test_prefix,
ann_file=test_ann_file,
loader=dict(
type='HardDiskLoader',
repeat=1,
parser=dict(
type='LineStrParser',
keys=['filename', 'text'],
keys_idx=[0, 1],
separator='\t')),
pipeline=None,
test_mode=False)
test_list = [test]
| 27.952381
| 108
| 0.647359
|
7436bf0d89fad1be0cc741a9c78c5a16fc7ae1b5
| 1,184
|
py
|
Python
|
catkin_ws/src/joystick_control/src/heading_to_usv.py
|
championway/asv_ros
|
4ded50c48077e1e63586cd32be2354633c163975
|
[
"MIT"
] | null | null | null |
catkin_ws/src/joystick_control/src/heading_to_usv.py
|
championway/asv_ros
|
4ded50c48077e1e63586cd32be2354633c163975
|
[
"MIT"
] | null | null | null |
catkin_ws/src/joystick_control/src/heading_to_usv.py
|
championway/asv_ros
|
4ded50c48077e1e63586cd32be2354633c163975
|
[
"MIT"
] | 1
|
2021-05-08T20:05:02.000Z
|
2021-05-08T20:05:02.000Z
|
#!/usr/bin/env python
import rospy
import math
from duckiepond.msg import Heading
from duckiepond_vehicle.msg import UsvDrive
class MotorCmd(object):
def __init__(self):
self.node_name = rospy.get_name()
rospy.loginfo("[%s] Initializing " %(self.node_name))
self.heading = None
self.pub_motor_cmd = rospy.Publisher("cmd_drive",UsvDrive,queue_size=1)
self.sub_heading = rospy.Subscriber("boat_heading",Heading,self.cbHeading,queue_size=1)
rospy.Timer(rospy.Duration(0.1), self.send_motor_cmd)
def send_motor_cmd(self, event):
if self.heading is None:
return
mcd_msg = UsvDrive()
mcd_msg.header.stamp = rospy.Time.now()
speed = self.heading.speed*math.sin(self.heading.phi)
difference = self.heading.speed*math.cos(self.heading.phi)
mcd_msg.left = max(min(speed - difference , 1),-1)
mcd_msg.right = max(min(speed + difference , 1),-1)
self.pub_motor_cmd.publish(mcd_msg)
def cbHeading(self,msg):
self.heading = msg
if __name__ == "__main__":
rospy.init_node("heading_to_usv")
commander = MotorCmd()
rospy.spin()
| 30.358974
| 95
| 0.661318
|
a622470f5659f8f0ef518cbf77eda6ed073431d4
| 3,255
|
py
|
Python
|
AutoUpgrade_Selenium/AutoUpgrade.py
|
yanqiaoyu/SetofAutomatedScripts
|
0001f2a438963c75f48a797ecbc1d72fa9bb43a4
|
[
"Apache-2.0"
] | null | null | null |
AutoUpgrade_Selenium/AutoUpgrade.py
|
yanqiaoyu/SetofAutomatedScripts
|
0001f2a438963c75f48a797ecbc1d72fa9bb43a4
|
[
"Apache-2.0"
] | null | null | null |
AutoUpgrade_Selenium/AutoUpgrade.py
|
yanqiaoyu/SetofAutomatedScripts
|
0001f2a438963c75f48a797ecbc1d72fa9bb43a4
|
[
"Apache-2.0"
] | null | null | null |
'''
@Author: YanQiaoYu
@Github: https://github.com/yanqiaoyu?tab=repositories
@Date: 2020-06-29 13:46:06
@LastEditors: YanQiaoYu
@LastEditTime: 2020-07-02 13:37:50
@FilePath: /SetofAutomatedScripts/AutoUpgrade_Selenium/AutoUpgrade.py
'''
from pynput import keyboard
from pynput.keyboard import Key, Controller
from selenium import webdriver
import time
from selenium.webdriver.support.select import Select
class Upgrade:
'''
@description: 初始化函数
@param {type}
@return:
'''
def __init__(self):
self.driver = webdriver.Chrome()
self.URL = "http://tplinkdeco.net"
self.Time2Sleep = 3
self.DutName = "\"X60\""
self.keyboard = Controller()
self.reboottime = 100
'''
@description: 登陆功能
@param {type}
@return:
'''
def Login(self):
self.driver.get("http://tplinkdeco.net")
time.sleep(self.Time2Sleep)
self.driver.find_element_by_xpath("//*[@id=\"local-login-pwd\"]/div[2]/div[1]/span[2]/input[1]").send_keys("1234567890")
time.sleep(self.Time2Sleep)
self.driver.find_element_by_id("local-login-button").click()
time.sleep(self.Time2Sleep)
'''
@description: 登陆之后到上传固件之前的一些跳转操作
@param {type}
@return:
'''
def PrepareUpgrade(self):
self.driver.find_element_by_xpath("//*[@id=\"main-menu\"]/div/div/ul/li[2]/a/span[1]").click()
time.sleep(self.Time2Sleep)
self.driver.find_element_by_xpath("//*[@id=\"navigator\"]/div[1]/div/ul/li[2]/a/span[2]").click()
time.sleep(self.Time2Sleep)
self.driver.find_element_by_xpath("//*[@id=\"navigator\"]/div[1]/div/ul/li[2]/ul/li[1]/a/span[2]").click()
time.sleep(self.Time2Sleep)
self.driver.find_element_by_xpath("//*[@id=\"manual-upgrade-file\"]/div[2]/div[1]/div[1]/span[2]/label").click()
time.sleep(self.Time2Sleep)
'''
@description: 通过Pynput进行固件选择
@param {type}
@return:
'''
def ChooseFileAndUpgrade(self):
#自定义选择升级固件的操作
self.keyboard.press(Key.right)
self.keyboard.release(Key.right)
time.sleep(self.Time2Sleep)
self.keyboard.press(Key.enter)
self.keyboard.release(Key.enter)
time.sleep(self.Time2Sleep)
'''
@description: 选中固件之后,对下拉框进行操作,并开始升级
@param {type}
@return:
'''
def ConfirmUpgrade(self):
a = self.driver.find_element_by_xpath("//*[@value=\"- Please Select -\"]")
a.click()
time.sleep(self.Time2Sleep)
self.driver.find_element_by_xpath("//*[@id=\"global-combobox-options\"]/div/div[3]/div/div/ul/li").click()
time.sleep(self.Time2Sleep)
self.driver.find_element_by_xpath("//*[@id=\"local-upgrade-btn\"]/div[2]/div[1]/a/span[2]").click()
time.sleep(self.Time2Sleep)
self.driver.find_element_by_id("firmware-upgrade-msg-btn-ok").click()
'''
@description: 等待重启
@param {type}
@return:
'''
def WaitingReboot(self):
time.sleep(self.reboottime)
a=Upgrade()
for i in range(100):
try:
a.Login()
a.PrepareUpgrade()
a.ChooseFileAndUpgrade()
a.ConfirmUpgrade()
a.WaitingReboot()
except Exception:
pass
| 30.420561
| 128
| 0.619355
|
ee88dbdc0fa930fa4a5a4e31ac95483f754e4fd5
| 1,468
|
py
|
Python
|
scripts/cscap/tileflow/ingest_tileflow.py
|
akrherz/datateam
|
2efbaa24ff2e28115eeabce9193c3d3b152068d8
|
[
"MIT"
] | 5
|
2017-05-20T04:51:55.000Z
|
2022-03-07T18:55:27.000Z
|
scripts/cscap/tileflow/ingest_tileflow.py
|
isudatateam/datateam
|
eb8e1dad6c05cb1b236689862fe87c56b25ea6fc
|
[
"MIT"
] | 275
|
2017-03-09T20:31:30.000Z
|
2022-03-30T22:43:47.000Z
|
scripts/cscap/tileflow/ingest_tileflow.py
|
isudatateam/datateam
|
eb8e1dad6c05cb1b236689862fe87c56b25ea6fc
|
[
"MIT"
] | 3
|
2020-06-01T15:03:06.000Z
|
2021-02-01T13:46:58.000Z
|
"""Tileflow ingest"""
import sys
import datetime
import pytz
import pandas as pd
import psycopg2
import numpy as np
from pyiem.cscap_utils import get_config, get_spreadsheet_client, Spreadsheet
CENTRAL_TIME = [
"ISUAG",
"GILMORE",
"SERF",
"CLAY_C",
"CLAY_R",
"MUDS2",
"MUDS3_OLD",
"MUDS4",
"SERF_SD",
"SERF_IA",
"STORY",
"UBWC",
]
def gio_process(filename):
"""This is a manually generated file by gio"""
pgconn = psycopg2.connect(database="sustainablecorn")
cursor = pgconn.cursor()
sql = """
INSERT into tileflow_data(uniqueid, plotid, valid,
discharge_mm, discharge_mm_qc) VALUES (%s, %s, %s, %s, %s)
"""
for i, line in enumerate(open(filename)):
if i == 0:
continue
(uniqueid, plotid, date, localtime, flow) = line.strip().split(",")
if localtime == "":
localtime = "00:00"
if flow == "":
flow = None
ts = datetime.datetime.strptime(
"%s %s" % (date, localtime), "%Y-%m-%d %H:%M"
)
offset = 6 if uniqueid in CENTRAL_TIME else 5
ts = ts + datetime.timedelta(hours=offset)
ts = ts.replace(tzinfo=pytz.utc)
cursor.execute(sql, (uniqueid, plotid, ts, flow, flow))
cursor.close()
pgconn.commit()
pgconn.close()
def main(argv):
"""Go Main"""
fn = argv[1]
gio_process(fn)
if __name__ == "__main__":
main(sys.argv)
| 22.9375
| 77
| 0.576975
|
49a6fe9c44ccb6f48e3d5322205b5c29141bf082
| 309
|
py
|
Python
|
netdisco/discoverables/lg_smart_device.py
|
jjlawren/netdisco
|
ffc3cd092bff359b1c1fc1ed51940624b3c8076b
|
[
"Apache-2.0"
] | 234
|
2016-04-18T23:47:08.000Z
|
2021-04-15T09:18:11.000Z
|
env/lib/python3.7/site-packages/netdisco/discoverables/lg_smart_device.py
|
seanmitch/UPnP
|
f3cb1ef62657f166971c1c470ce5dfd58bdeeac9
|
[
"Unlicense"
] | 175
|
2016-04-13T11:25:29.000Z
|
2019-05-27T03:20:00.000Z
|
env/lib/python3.7/site-packages/netdisco/discoverables/lg_smart_device.py
|
seanmitch/UPnP
|
f3cb1ef62657f166971c1c470ce5dfd58bdeeac9
|
[
"Unlicense"
] | 185
|
2016-04-13T11:32:11.000Z
|
2021-03-31T14:26:39.000Z
|
"""Discover LG smart devices."""
from . import MDNSDiscoverable
# pylint: disable=too-few-public-methods
class Discoverable(MDNSDiscoverable):
"""Add support for discovering LG smart devices."""
def __init__(self, nd):
super(Discoverable, self).__init__(nd, '_lg-smart-device._tcp.local.')
| 28.090909
| 78
| 0.718447
|
76889a7ba77dff97aa0e87cbcc10c6ff4cb4c4b6
| 273
|
py
|
Python
|
setup.py
|
igred8/laser-rb
|
1c3d1acc821a5bdaebba8fe63f1013430a1fe80f
|
[
"MIT"
] | null | null | null |
setup.py
|
igred8/laser-rb
|
1c3d1acc821a5bdaebba8fe63f1013430a1fe80f
|
[
"MIT"
] | null | null | null |
setup.py
|
igred8/laser-rb
|
1c3d1acc821a5bdaebba8fe63f1013430a1fe80f
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Simulations, calculations, reports, and misc. for the UV laser transport to the photo cathode.',
author='igad',
license='MIT',
)
| 24.818182
| 113
| 0.688645
|
56522c908b53f9d0b9195dffa7c67dd2a6581e75
| 1,004
|
py
|
Python
|
UniShared_python/website/forms.py
|
UniShared/unishared
|
0abc36be9e4262a4928945f70ec030e46e05149b
|
[
"MIT"
] | null | null | null |
UniShared_python/website/forms.py
|
UniShared/unishared
|
0abc36be9e4262a4928945f70ec030e46e05149b
|
[
"MIT"
] | null | null | null |
UniShared_python/website/forms.py
|
UniShared/unishared
|
0abc36be9e4262a4928945f70ec030e46e05149b
|
[
"MIT"
] | 2
|
2019-03-03T17:34:48.000Z
|
2019-04-23T17:34:14.000Z
|
from django.forms import ModelForm, Form, CharField, ValidationError
from website.models import NoteTakingBuddy, Training
__author__ = 'arnaud'
class DocumentTitleForm(Form):
title = CharField(max_length=256)
def clean_title(self):
title = self.cleaned_data['title']
result = Training.objects.filter(title__icontains=title)
if not result.exists():
raise ValidationError('This document doesn\'t exists')
class NoteTakingBuddyForm(ModelForm):
class Meta:
model = NoteTakingBuddy
exclude = ('hub', 'user', 'score',)
def __init__(self, *args, **kwargs):
self._hub = kwargs.pop('hub', None)
self._user = kwargs.pop('user', None)
super(NoteTakingBuddyForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
inst = self.instance
inst.hub = self._hub
inst.user = self._user
inst.score = inst.get_score()
if commit:
inst.save()
return inst
| 30.424242
| 68
| 0.64243
|
994bd523542806d6a443bbb56fb9bf904539bca7
| 13,071
|
py
|
Python
|
cinder/volume/drivers/dell_emc/sc/storagecenter_iscsi.py
|
liangintel/stx-cinder
|
f4c43797a3f8c0caebfd8fb67244c084d26d9741
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/dell_emc/sc/storagecenter_iscsi.py
|
liangintel/stx-cinder
|
f4c43797a3f8c0caebfd8fb67244c084d26d9741
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/dell_emc/sc/storagecenter_iscsi.py
|
liangintel/stx-cinder
|
f4c43797a3f8c0caebfd8fb67244c084d26d9741
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015-2017 Dell Inc, or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume driver for Dell Storage Center."""
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.dell_emc.sc import storagecenter_common
LOG = logging.getLogger(__name__)
@interface.volumedriver
class SCISCSIDriver(storagecenter_common.SCCommonDriver,
driver.ISCSIDriver):
"""Implements commands for Dell Storage Center ISCSI management.
To enable the driver add the following line to the cinder configuration:
volume_driver=cinder.volume.drivers.dell_emc.sc.\
dell_storagecenter_iscsi.SCISCSIDriver
Version history:
.. code-block:: none
1.0.0 - Initial driver
1.1.0 - Added extra spec support for Storage Profile selection
1.2.0 - Added consistency group support.
2.0.0 - Switched to inheriting functional objects rather than volume
driver.
2.1.0 - Added support for ManageableVD.
2.2.0 - Driver retype support for switching volume's Storage Profile.
Added API 2.2 support.
2.3.0 - Added Legacy Port Mode Support
2.3.1 - Updated error handling.
2.4.0 - Added Replication V2 support.
2.4.1 - Updated Replication support to V2.1.
2.5.0 - ManageableSnapshotsVD implemented.
3.0.0 - ProviderID utilized.
3.1.0 - Failback Supported.
3.2.0 - Live Volume support.
3.3.0 - Support for a secondary DSM.
3.4.0 - Support for excluding a domain.
3.5.0 - Support for AFO.
3.6.0 - Server type support.
3.7.0 - Support for Data Reduction, Group QOS and Volume QOS.
4.0.0 - Driver moved to dell_emc.
"""
VERSION = '4.0.0'
CI_WIKI_NAME = "Dell_Storage_CI"
def __init__(self, *args, **kwargs):
super(SCISCSIDriver, self).__init__(*args, **kwargs)
self.backend_name = (
self.configuration.safe_get('volume_backend_name') or 'Dell-iSCSI')
def initialize_connection(self, volume, connector):
# Initialize_connection will find or create a server identified by the
# connector on the Dell backend. It will then map the volume to it
# and return the properties as follows..
# {'driver_volume_type': 'iscsi',
# data = {'target_discovered': False,
# 'target_iqn': preferred iqn,
# 'target_iqns': all iqns,
# 'target_portal': preferred portal,
# 'target_portals': all portals,
# 'target_lun': preferred lun,
# 'target_luns': all luns,
# }
# We use id to name the volume name as it is a
# known unique name.
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
islivevol = self._is_live_vol(volume)
initiator_name = connector.get('initiator')
multipath = connector.get('multipath', False)
LOG.info('initialize_ connection: %(vol)s:%(pid)s:'
'%(intr)s. Multipath is %(mp)r',
{'vol': volume_name,
'pid': provider_id,
'intr': initiator_name,
'mp': multipath})
with self._client.open_connection() as api:
try:
# Find the volume on the storage center. Note that if this
# is live volume and we are swapped this will be the back
# half of the live volume.
scvolume = api.find_volume(volume_name, provider_id, islivevol)
if scvolume:
# Get the SSN it is on.
ssn = scvolume['instanceId'].split('.')[0]
# Find our server.
scserver = api.find_server(initiator_name, ssn)
# No? Create it.
if scserver is None:
scserver = api.create_server(
[initiator_name],
self.configuration.dell_server_os, ssn)
# if we have a server and a volume lets bring them
# together.
if scserver is not None:
mapping = api.map_volume(scvolume, scserver)
if mapping is not None:
# Since we just mapped our volume we had best
# update our sc volume object.
scvolume = api.get_volume(scvolume['instanceId'])
# Our return.
iscsiprops = {}
# Three cases that should all be satisfied with the
# same return of Target_Portal and Target_Portals.
# 1. Nova is calling us so we need to return the
# Target_Portal stuff. It should ignore the
# Target_Portals stuff.
# 2. OS brick is calling us in multipath mode so we
# want to return Target_Portals. It will ignore
# the Target_Portal stuff.
# 3. OS brick is calling us in single path mode so
# we want to return Target_Portal and
# Target_Portals as alternates.
iscsiprops = api.find_iscsi_properties(scvolume)
# If this is a live volume we need to map up our
# secondary volume. Note that if we have failed
# over we do not wish to do this.
if islivevol:
sclivevolume = api.get_live_volume(provider_id)
# Only map if we are not failed over.
if (sclivevolume and not
api.is_failed_over(provider_id,
sclivevolume)):
secondaryprops = self.initialize_secondary(
api, sclivevolume, initiator_name)
# Combine with iscsiprops
iscsiprops['target_iqns'] += (
secondaryprops['target_iqns'])
iscsiprops['target_portals'] += (
secondaryprops['target_portals'])
iscsiprops['target_luns'] += (
secondaryprops['target_luns'])
# Return our iscsi properties.
iscsiprops['discard'] = True
return {'driver_volume_type': 'iscsi',
'data': iscsiprops}
# Re-raise any backend exception.
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.error('Failed to initialize connection')
# If there is a data structure issue then detail the exception
# and bail with a Backend Exception.
except Exception as error:
LOG.error(error)
raise exception.VolumeBackendAPIException(error)
# We get here because our mapping is none or we have no valid iqn to
# return so blow up.
raise exception.VolumeBackendAPIException(
_('Unable to map volume'))
def initialize_secondary(self, api, sclivevolume, initiatorname):
"""Initialize the secondary connection of a live volume pair.
:param api: Dell SC api.
:param sclivevolume: Dell SC live volume object.
:param initiatorname: Cinder iscsi initiator from the connector.
:return: ISCSI properties.
"""
# Find our server.
secondary = api.find_server(initiatorname,
sclivevolume['secondaryScSerialNumber'])
# No? Create it.
if secondary is None:
secondary = api.create_server(
[initiatorname], self.configuration.dell_server_os,
sclivevolume['secondaryScSerialNumber'])
if secondary:
if api.map_secondary_volume(sclivevolume, secondary):
# Get our volume and get our properties.
secondaryvol = api.get_volume(
sclivevolume['secondaryVolume']['instanceId'])
if secondaryvol:
return api.find_iscsi_properties(secondaryvol)
# Dummy return on failure.
data = {'target_discovered': False,
'target_iqn': None,
'target_iqns': [],
'target_portal': None,
'target_portals': [],
'target_lun': None,
'target_luns': [],
}
LOG.warning('Unable to map live volume secondary volume'
' %(vol)s to secondary server intiator: %(init)r',
{'vol': sclivevolume['secondaryVolume']['instanceName'],
'init': initiatorname})
return data
def terminate_connection(self, volume, connector, force=False, **kwargs):
# Grab some quick info.
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
initiator_name = None if not connector else connector.get('initiator')
LOG.debug('Terminate connection: %(vol)s:%(initiator)s',
{'vol': volume_name,
'initiator': initiator_name})
with self._client.open_connection() as api:
try:
# Find the volume on the storage center. Note that if this
# is live volume and we are swapped this will be the back
# half of the live volume.
islivevol = self._is_live_vol(volume)
scvolume = api.find_volume(volume_name, provider_id, islivevol)
if scvolume:
# Get the SSN it is on.
ssn = scvolume['instanceId'].split('.')[0]
# Unmap our secondary if not failed over..
if islivevol:
sclivevolume = api.get_live_volume(provider_id)
if (sclivevolume and not
api.is_failed_over(provider_id,
sclivevolume)):
self.terminate_secondary(api, sclivevolume,
initiator_name)
# Find our server.
scserver = (None if not initiator_name else
api.find_server(initiator_name, ssn))
# If we have a server and a volume lets pull them apart.
if ((scserver and
api.unmap_volume(scvolume, scserver) is True) or
(not scserver and api.unmap_all(scvolume))):
LOG.debug('Connection terminated')
return
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Failed to terminate connection '
'%(initiator)s %(vol)s',
{'initiator': initiator_name,
'vol': volume_name})
raise exception.VolumeBackendAPIException(
_('Terminate connection failed'))
def terminate_secondary(self, api, sclivevolume, initiatorname):
secondaryvol = api.get_volume(
sclivevolume['secondaryVolume']['instanceId'])
if secondaryvol:
if initiatorname:
# Find our server.
secondary = api.find_server(
initiatorname, sclivevolume['secondaryScSerialNumber'])
return api.unmap_volume(secondaryvol, secondary)
else:
return api.unmap_all(secondaryvol)
else:
LOG.debug('terminate_secondary: secondary volume not found.')
| 46.187279
| 79
| 0.536761
|
818f3eedd771f0c558e9bda6ed59518c44d532dd
| 13,049
|
py
|
Python
|
src/train.py
|
andi611/CS-Tacotron
|
8a0d61899e95802b18024cdf0710ea405bc08878
|
[
"MIT"
] | 20
|
2019-03-07T13:19:37.000Z
|
2022-03-27T22:00:55.000Z
|
src/train.py
|
andi611/CS-Tacotron
|
8a0d61899e95802b18024cdf0710ea405bc08878
|
[
"MIT"
] | 5
|
2020-01-28T22:20:22.000Z
|
2022-02-09T23:36:55.000Z
|
src/train.py
|
samirsahoo007/CS-Tacotron-Pytorch
|
fe63ef5ddcfc83345cc9c3d128b72a1c4de12682
|
[
"MIT"
] | 8
|
2019-12-18T07:12:40.000Z
|
2021-12-21T08:54:38.000Z
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ train.py ]
# Synopsis [ Trainining script for Tacotron speech synthesis model ]
# Author [ Ting-Wei Liu (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
"""
Usage: train.py [options]
Options:
--checkpoint_dir <dir> Directory where to save model checkpoints [default: checkpoints].
--checkpoint_path <name> Restore model from checkpoint path if given.
--data_root <dir> Directory contains preprocessed features.
--meta_text <name> Name of the model-ready training transcript.
--summary_comment <str> Comment for log summary writer.
-h, --help Show this help message and exit
"""
###############
# IMPORTATION #
###############
import os
import sys
import time
#-----------------------#
import numpy as np
import librosa.display
#---------------------#
from utils import audio
from utils.plot import plot_alignment, plot_spectrogram
from utils.text import text_to_sequence, symbols
#----------------------------------------------#
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from torch.utils import data
#----------------------------------------#
from model.tacotron import Tacotron
from config import config, get_training_args
#------------------------------------------#
from nnmnkwii.datasets import FileSourceDataset, FileDataSource
from tensorboardX import SummaryWriter
####################
# GLOBAL VARIABLES #
####################
global_step = 0
global_epoch = 0
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
cudnn.benchmark = False
DATA_ROOT = None
META_TEXT = None
def _pad(seq, max_len):
return np.pad(seq, (0, max_len - len(seq)),
mode='constant', constant_values=0)
def _pad_2d(x, max_len):
x = np.pad(x, [(0, max_len - len(x)), (0, 0)],
mode="constant", constant_values=0)
return x
####################
# TEXT DATA SOURCE #
####################
class TextDataSource(FileDataSource):
def __init__(self):
pass #self._cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
def collect_files(self):
meta = os.path.join(DATA_ROOT, META_TEXT)
with open(meta, 'r', encoding='utf-8') as f:
lines = f.readlines()
lines = list(map(lambda l: l.split("|")[-1][:-1], lines))
return lines
def collect_features(self, text):
return np.asarray(text_to_sequence(text), dtype=np.int32)
###################
# NPY DATA SOURCE #
###################
class _NPYDataSource(FileDataSource):
def __init__(self, col):
self.col = col
def collect_files(self):
meta = os.path.join(DATA_ROOT, META_TEXT)
with open(meta, 'r', encoding='utf-8') as f:
lines = f.readlines()
lines = list(map(lambda l: l.split("|")[self.col], lines))
paths = list(map(lambda f: os.path.join(DATA_ROOT, f), lines))
return paths
def collect_features(self, path):
return np.load(path)
########################
# MEL SPEC DATA SOURCE #
########################
class MelSpecDataSource(_NPYDataSource):
def __init__(self):
super(MelSpecDataSource, self).__init__(1)
###########################
# LINEAR SPEC DATA SOURCE #
###########################
class LinearSpecDataSource(_NPYDataSource):
def __init__(self):
super(LinearSpecDataSource, self).__init__(0)
#######################
# PYTORCH DATA SOURCE #
#######################
class PyTorchDataset(object):
def __init__(self, X, Mel, Y):
self.X = X
self.Mel = Mel
self.Y = Y
def __getitem__(self, idx):
return self.X[idx], self.Mel[idx], self.Y[idx]
def __len__(self):
return len(self.X)
##############
# COLLATE FN #
##############
"""
Create batch
"""
def collate_fn(batch):
r = config.outputs_per_step
input_lengths = [len(x[0]) for x in batch]
max_input_len = np.max(input_lengths)
max_target_len = np.max([len(x[1]) for x in batch]) + 1 # Add single zeros frame at least, so plus 1
if max_target_len % r != 0:
max_target_len += r - max_target_len % r
assert max_target_len % r == 0
a = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int)
x_batch = torch.LongTensor(a)
input_lengths = torch.LongTensor(input_lengths)
b = np.array([_pad_2d(x[1], max_target_len) for x in batch], dtype=np.float32)
mel_batch = torch.FloatTensor(b)
c = np.array([_pad_2d(x[2], max_target_len) for x in batch], dtype=np.float32)
y_batch = torch.FloatTensor(c)
return x_batch, input_lengths, mel_batch, y_batch
#######################
# LEARNING RATE DECAY #
#######################
def _learning_rate_decay(init_lr, global_step):
warmup_steps = 6000.0
step = global_step + 1.
lr = init_lr * warmup_steps**0.5 * np.minimum(step * warmup_steps**-1.5, step**-0.5)
return lr
###############
# SAVE STATES #
###############
def save_states(global_step, mel_outputs, linear_outputs, attn, y,
input_lengths, checkpoint_dir=None):
idx = min(1, len(input_lengths) - 1) # idx = np.random.randint(0, len(input_lengths))
input_length = input_lengths[idx]
# Alignment
path = os.path.join(checkpoint_dir, "step{}_alignment.png".format(
global_step))
alignment = attn[idx].cpu().data.numpy() # alignment = attn[idx].cpu().data.numpy()[:, :input_length]
plot_alignment(alignment.T, path, info="tacotron, step={}".format(global_step))
# Predicted spectrogram
path = os.path.join(checkpoint_dir, "step{}_predicted_spectrogram.png".format(
global_step))
linear_output = linear_outputs[idx].cpu().data.numpy()
plot_spectrogram(linear_output, path)
# Predicted audio signal
signal = audio.inv_spectrogram(linear_output.T)
path = os.path.join(checkpoint_dir, "step{}_predicted.wav".format(
global_step))
audio.save_wav(signal, path)
# Target spectrogram
path = os.path.join(checkpoint_dir, "step{}_target_spectrogram.png".format(
global_step))
linear_output = y[idx].cpu().data.numpy()
plot_spectrogram(linear_output, path)
###################
# SAVE CHECKPOINT #
###################
def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch):
checkpoint_path = os.path.join(checkpoint_dir, "checkpoint_step{}.pth".format(global_step))
torch.save({"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"global_step": step,
"global_epoch": epoch,},
checkpoint_path)
#################
# TACOTRON STEP #
#################
"""
One step of training: Train a single batch of data on Tacotron
"""
def tacotron_step(model, optimizer, criterion,
x, input_lengths, mel, y,
init_lr, sample_rate, clip_thresh,
running_loss, data_len, global_step):
#---decay learning rate---#
current_lr = _learning_rate_decay(init_lr, global_step)
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
optimizer.zero_grad()
#---sort by length---#
sorted_lengths, indices = torch.sort(input_lengths.view(-1), dim=0, descending=True)
sorted_lengths = sorted_lengths.long().numpy()
#---feed data---#
x, mel, y = Variable(x[indices]), Variable(mel[indices]), Variable(y[indices])
if USE_CUDA:
x, mel, y = x.cuda(), mel.cuda(), y.cuda()
mel_outputs, linear_outputs, attn = model(x, mel, input_lengths=sorted_lengths)
#---Loss---#
mel_loss = criterion(mel_outputs, mel)
n_priority_freq = int(3000 / (sample_rate * 0.5) * model.linear_dim)
linear_loss = 0.5 * criterion(linear_outputs, y) + 0.5 * criterion(linear_outputs[:, :, :n_priority_freq], y[:, :, :n_priority_freq])
loss = mel_loss + linear_loss
#---log loss---#
total_L = loss.item()
running_loss += loss.item()
avg_L = running_loss / (data_len)
mel_L = mel_loss.item()
linear_L = linear_loss.item()
#---update model---#
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip_thresh)
optimizer.step()
#---wrap up returns---#
Ms = { 'mel_outputs' : mel_outputs,
'linear_outputs' : linear_outputs,
'attn' : attn,
'sorted_lengths' : sorted_lengths,
'grad_norm' : grad_norm,
'current_lr' : current_lr }
Ls = { 'total_L': total_L,
'avg_L' : avg_L,
'mel_L' : mel_L,
'linear_L' : linear_L }
return model, optimizer, Ms, Ls
#########
# TRAIN #
#########
def train(model,
optimizer,
data_loader,
summary_comment,
init_lr=0.002,
checkpoint_dir=None,
checkpoint_interval=None,
nepochs=None,
clip_thresh=1.0,
sample_rate=20000):
if USE_CUDA:
model = model.cuda()
model.train()
writer = SummaryWriter() if summary_comment == None else SummaryWriter(summary_comment)
global global_step, global_epoch
criterion = nn.L1Loss()
while global_epoch < nepochs:
start = time.time()
running_loss = 0.
for x, input_lengths, mel, y in data_loader:
model, optimizer, Ms, Rs = tacotron_step(model, optimizer, criterion,
x, input_lengths, mel, y,
init_lr, sample_rate, clip_thresh,
running_loss, len(data_loader), global_step)
mel_outputs = Ms['mel_outputs']
linear_outputs = Ms['linear_outputs']
attn = Ms['attn']
sorted_lengths = Ms['sorted_lengths']
grad_norm = Ms['grad_norm']
current_lr = Ms['current_lr']
total_L = Rs['total_L']
avg_L = Rs['avg_L']
mel_L = Rs['mel_L']
linear_L = Rs['linear_L']
duration = time.time() - start
if global_step > 0 and global_step % checkpoint_interval == 0:
save_states(global_step, mel_outputs, linear_outputs, attn, y, sorted_lengths, checkpoint_dir)
save_checkpoint(model, optimizer, global_step, checkpoint_dir, global_epoch)
log = '[{}] total_L: {:.3f}, avg_L: {:.3f}, mel_L: {:.3f}, mag_L: {:.3f}, grad_norm: {:.3f}, lr: {:.5f}, t: {:.2f}s, saved: T'.format(global_step, total_L, avg_L, mel_L, linear_L, grad_norm, current_lr, duration)
print(log)
elif global_step % 5 == 0:
log = '[{}] total_L: {:.3f}, avg_L: {:.3f}, mel_L: {:.3f}, mag_L: {:.3f}, grad_norm: {:.3f}, lr: {:.5f}, t: {:.2f}s, saved: F'.format(global_step, total_L, avg_L, mel_L, linear_L, grad_norm, current_lr, duration)
print(log, end='\r')
# Logs
writer.add_scalar('total_loss', total_L, global_step)
writer.add_scalar('averaged_loss', avg_L, global_step)
writer.add_scalar('mel_loss', mel_L, global_step)
writer.add_scalar('linear_loss', linear_L, global_step)
writer.add_scalar('grad_norm', grad_norm, global_step)
writer.add_scalar('learning_rate', current_lr, global_step)
global_step += 1
start = time.time()
global_epoch += 1
#######################
# INITIALIZE TRAINING #
#######################
"""
Setup and prepare for Tacotron training.
"""
def initialize_training(checkpoint_path):
# Input dataset definitions
X = FileSourceDataset(TextDataSource())
Mel = FileSourceDataset(MelSpecDataSource())
Y = FileSourceDataset(LinearSpecDataSource())
# Dataset and Dataloader setup
dataset = PyTorchDataset(X, Mel, Y)
data_loader = data.DataLoader(dataset,
batch_size=config.batch_size,
num_workers=config.num_workers,
shuffle=True,
collate_fn=collate_fn,
pin_memory=config.pin_memory)
# Model
model = Tacotron(n_vocab=len(symbols),
embedding_dim=config.embedding_dim,
mel_dim=config.num_mels,
linear_dim=config.num_freq,
r=config.outputs_per_step,
padding_idx=config.padding_idx,
use_memory_mask=config.use_memory_mask)
optimizer = optim.Adam(model.parameters(),
lr=config.initial_learning_rate,
betas=(config.adam_beta1, config.adam_beta2),
weight_decay=config.weight_decay)
# Load checkpoint
if checkpoint_path != None:
print("Load checkpoint from: {}".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
try:
global_step = checkpoint["global_step"]
global_epoch = checkpoint["global_epoch"]
except:
print('Warning: global step and global epoch unable to restore!')
sys.exit(0)
return model, optimizer, data_loader
########
# MAIN #
########
def main():
args = get_training_args()
global DATA_ROOT, META_TEXT
if args.data_root != None:
DATA_ROOT = args.data_root
if args.meta_text != None:
META_TEXT = args.meta_text
checkpoint_dir = args.checkpoint_dir
checkpoint_path = args.checkpoint_path
os.makedirs(checkpoint_dir, exist_ok=True)
model, optimizer, data_loader = initialize_training(checkpoint_path)
# Train!
try:
train(model, optimizer, data_loader, args.summary_comment,
init_lr=config.initial_learning_rate,
checkpoint_dir=checkpoint_dir,
checkpoint_interval=config.checkpoint_interval,
nepochs=config.nepochs,
clip_thresh=config.clip_thresh,
sample_rate=config.sample_rate)
except KeyboardInterrupt:
print()
pass
print("Finished")
sys.exit(0)
if __name__ == "__main__":
main()
| 28.869469
| 216
| 0.653153
|
63b8aabc6ee3156e9e90c5c682bcdba5bd57abd0
| 1,592
|
py
|
Python
|
src/util/__init__.py
|
Mathtin/overlord
|
a800f0903e99ffe989efb778937b89d2707ee510
|
[
"MIT"
] | null | null | null |
src/util/__init__.py
|
Mathtin/overlord
|
a800f0903e99ffe989efb778937b89d2707ee510
|
[
"MIT"
] | null | null | null |
src/util/__init__.py
|
Mathtin/overlord
|
a800f0903e99ffe989efb778937b89d2707ee510
|
[
"MIT"
] | 1
|
2021-02-15T23:12:45.000Z
|
2021-02-15T23:12:45.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020-present Daniel [Mathtin] Shiko <wdaniil@mail.ru>
Project: Overlord discord bot
Contributors: Danila [DeadBlasoul] Popov <dead.blasoul@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = "Mathtin"
from .config import ConfigView, ConfigParser, ConfigManager
from .exceptions import InvalidConfigException, NotCoroutineException
from .resources import STRINGS as R
from .common import get_module_element, dict_fancy_table, pretty_days, pretty_seconds, parse_control_message, \
limit_traceback, FORMATTERS
| 43.027027
| 111
| 0.799623
|
549b38a358011cb46f39af64b34b4863070dcdca
| 12,010
|
py
|
Python
|
objectModel/Python/cdm/objectmodel/projections/cdm_projection.py
|
CBA-Consult/CDM
|
892bceac7a15167c85342cc1c61d7ecdf5f1b78d
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
objectModel/Python/cdm/objectmodel/projections/cdm_projection.py
|
CBA-Consult/CDM
|
892bceac7a15167c85342cc1c61d7ecdf5f1b78d
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
objectModel/Python/cdm/objectmodel/projections/cdm_projection.py
|
CBA-Consult/CDM
|
892bceac7a15167c85342cc1c61d7ecdf5f1b78d
|
[
"CC-BY-4.0",
"MIT"
] | 1
|
2021-09-24T16:51:04.000Z
|
2021-09-24T16:51:04.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import Optional, TYPE_CHECKING
from cdm.enums import CdmObjectType, CdmAttributeContextType
from cdm.objectmodel import CdmObjectDefinition, CdmOperationCollection, CdmAttributeContext
from cdm.resolvedmodel import ResolvedAttributeSet
from cdm.resolvedmodel.expression_parser.expression_tree import ExpressionTree
from cdm.resolvedmodel.expression_parser.input_values import InputValues
from cdm.resolvedmodel.expression_parser.node import Node
from cdm.resolvedmodel.projections.condition_expression import ConditionExpression
from cdm.resolvedmodel.projections.projection_attribute_state_set import ProjectionAttributeStateSet
from cdm.resolvedmodel.projections.projection_context import ProjectionContext
from cdm.resolvedmodel.projections.projection_resolution_common_util import ProjectionResolutionCommonUtil
from cdm.utilities import Errors, logger, AttributeContextParameters
if TYPE_CHECKING:
from cdm.objectmodel import CdmCorpusContext, CdmEntityReference
from cdm.utilities import ResolveOptions, VisitCallback
from cdm.resolvedmodel.projections.projection_directive import ProjectionDirective
class CdmProjection(CdmObjectDefinition):
"""Class for Projection"""
def __init__(self, ctx: 'CdmCorpusContext') -> None:
super().__init__(ctx)
# Property of a projection that holds the condition expression string
self.condition = None # type: str
# Property of a projection that holds a collection of operations
self.operations = CdmOperationCollection(ctx, self) # type: CdmOperationCollection
# Property of a projection that holds the source of the operation
self.source = None # type: CdmEntityReference
# --- internal ---
# Condition expression tree that is built out of a condition expression string
self._condition_expression_tree_root = None # type: Node
self._TAG = CdmProjection.__name__
def copy(self, res_opt: Optional['ResolveOptions'] = None, host: Optional['CdmProjection'] = None) -> 'CdmProjection':
logger.error(self._TAG, self.ctx, 'Projection operation not implemented yet.', 'copy')
return CdmProjection(self.ctx)
def get_name(self) -> str:
return 'projection'
@property
def object_type(self) -> 'CdmObjectType':
return CdmObjectType.PROJECTION_DEF
def is_derived_from(self, base: str, res_opt: Optional['ResolveOptions'] = None) -> bool:
# Since projections don't support inheritance, return false
return False
def validate(self) -> bool:
missing_fields = []
if not bool(self.source):
missing_fields.append('source')
if len(missing_fields) > 0:
logger.error(self._TAG, self.ctx, Errors.validate_error_string(self.at_corpus_path, missing_fields))
return False
return True
def visit(self, path_from: str, pre_children: 'VisitCallback', post_children: 'VisitCallback') -> bool:
path = ''
if not self.ctx.corpus._block_declared_path_changes:
path = self._declared_path
if not path:
path = path_from + 'projection'
self._declared_path = path
if pre_children and pre_children(self, path):
return False
if self.source:
if self.source.visit(path + '/source/', pre_children, post_children):
return True
result = False
if self.operations and len(self.operations) > 0:
# since self.Operations.VisitList results is non-unique attribute context paths if there are 2 or more operations of the same type.
# e.g. with composite keys
# the solution is to add a unique identifier to the path by adding the operation index or opIdx
for op_index in range(len(self.operations)):
self.operations[op_index]._index = op_index + 1
if self.operations[op_index] and self.operations[op_index].visit('{}/operation/index{}/'.format(path, op_index + 1), pre_children, post_children):
result = True
else:
result = False
if result:
return True
if post_children and post_children(self, path):
return True
return False
def _fetch_resolved_traits(self, res_opt: Optional['ResolveOptions'] = None) -> 'ResolvedTraitSet':
return self.source._fetch_resolved_traits(res_opt)
def _construct_projection_context(self, proj_directive: 'ProjectionDirective', attr_ctx: 'CdmAttributeContext') -> 'ProjectionContext':
"""
A function to construct projection context and populate the resolved attribute set that ExtractResolvedAttributes method can then extract
This function is the entry point for projection resolution.
This function is expected to do the following 3 things:
- Create an condition expression tree & default if appropriate
- Create and initialize Projection Context
- Process operations
"""
proj_context = None
if not self.condition:
# if no condition is provided, get default condition and persist
self.condition = ConditionExpression._get_default_condition_expression(self.operations, self.owner)
# create an expression tree based on the condition
tree = ExpressionTree()
self._condition_expression_tree_root = tree._construct_expression_tree(self.condition)
if not self._condition_expression_tree_root:
logger.info(self._TAG, self.ctx, 'Optional expression missing. Implicit expression will automatically apply.', CdmProjection._construct_projection_context.__name__)
if attr_ctx:
# Add projection to context tree
acp_proj = AttributeContextParameters()
acp_proj._under = attr_ctx
acp_proj._type = CdmAttributeContextType.PROJECTION
acp_proj._name = self.fetch_object_definition_name()
acp_proj._regarding = proj_directive._owner_ref
acp_proj._include_traits = False
ac_proj = CdmAttributeContext._create_child_under(proj_directive._res_opt, acp_proj)
acp_source = AttributeContextParameters()
acp_source._under = ac_proj
acp_source._type = CdmAttributeContextType.SOURCE
acp_source._name = 'source'
acp_source._regarding = None
acp_source._include_traits = False
ac_source = CdmAttributeContext._create_child_under(proj_directive._res_opt, acp_source)
if self.source.fetch_object_definition(proj_directive._res_opt).object_type == CdmObjectType.PROJECTION_DEF:
# A Projection
proj_context = self.source.explicit_reference._construct_projection_context(proj_directive, ac_source)
else:
# An Entity Reference
acp_source_projection = AttributeContextParameters()
acp_source_projection._under = ac_source
acp_source_projection._type = CdmAttributeContextType.ENTITY
acp_source_projection._name = self.source.named_reference if self.source.named_reference else self.source.explicit_reference.get_name()
acp_source_projection._regarding = self.source
acp_source_projection._include_traits = False
ras = self.source._fetch_resolved_attributes(proj_directive._res_opt, acp_source_projection)
# Initialize the projection context
ctx = proj_directive._owner.ctx if proj_directive._owner else None
pas_set = None
# if polymorphic keep original source as previous state
poly_source_set = None
if proj_directive._is_source_polymorphic:
poly_source_set = ProjectionResolutionCommonUtil._get_polymorphic_source_set(proj_directive, ctx, self.source, acp_source_projection)
# now initialize projection attribute state
pas_set = ProjectionResolutionCommonUtil._initialize_projection_attribute_state_set(
proj_directive,
ctx,
ras,
proj_directive._is_source_polymorphic,
poly_source_set
)
proj_context = ProjectionContext(proj_directive, ras.attribute_context)
proj_context._current_attribute_state_set = pas_set
is_condition_valid = False
if self._condition_expression_tree_root:
input = InputValues()
input.no_max_depth = proj_directive._has_no_maximum_depth
input.is_array = proj_directive._is_array
input.reference_only = proj_directive._is_reference_only
input.normalized = proj_directive._is_normalized
input.structured = proj_directive._is_structured
input.is_virtual = proj_directive._is_virtual
current_depth = proj_directive._current_depth
current_depth += 1
input.next_depth = current_depth
proj_directive._current_depth = current_depth
input.max_depth = proj_directive._maximum_depth
input.min_cardinality = proj_directive._cardinality._minimum_number if proj_directive._cardinality else None
input.max_cardinality = proj_directive._cardinality._maximum_number if proj_directive._cardinality else None
is_condition_valid = ExpressionTree._evaluate_expression_tree(self._condition_expression_tree_root, input)
if is_condition_valid and self.operations and len(self.operations) > 0:
# Just in case operations were added programmatically, reindex operations
for i in range(len(self.operations)):
self.operations[i]._index = i + 1
# Operation
acp_gen_attr_set = AttributeContextParameters()
acp_gen_attr_set._under = attr_ctx
acp_gen_attr_set._type = CdmAttributeContextType.GENERATED_SET
acp_gen_attr_set._name = '_generatedAttributeSet'
ac_gen_attr_set = CdmAttributeContext._create_child_under(proj_directive._res_opt, acp_gen_attr_set)
# Start with an empty list for each projection
pas_operations = ProjectionAttributeStateSet(proj_context._current_attribute_state_set._ctx)
for operation in self.operations:
# Evaluate projections and apply to empty state
new_pas_operations = operation._append_projection_attribute_state(proj_context, pas_operations, ac_gen_attr_set)
# If the operations fails or it is not implemented the projection cannot be evaluated so keep previous valid state.
if new_pas_operations is not None:
pas_operations = new_pas_operations
# Finally update the current state to the projection context
proj_context._current_attribute_state_set = pas_operations
return proj_context
def _extract_resolved_attributes(self, proj_ctx: 'ProjectionContext') -> 'ResolvedAttributeSet':
"""Create resolved attribute set based on the CurrentResolvedAttribute array"""
resolved_attribute_set = ResolvedAttributeSet()
resolved_attribute_set.attribute_context = proj_ctx._current_attribute_context
for pas in proj_ctx._current_attribute_state_set._states:
resolved_attribute_set.merge(pas._current_resolved_attribute, pas._current_resolved_attribute.att_ctx)
return resolved_attribute_set
| 48.04
| 176
| 0.690341
|
265bee6194b3a183bd0ebeab6d3b711a414c7d3a
| 12,398
|
py
|
Python
|
rfsoc_sam/quick_widgets.py
|
schelleg/rfsoc_sam
|
2a4ae0fc1686d1e3146cbea5d8578c7432910b92
|
[
"BSD-3-Clause"
] | 39
|
2020-02-22T00:40:51.000Z
|
2022-03-30T00:39:45.000Z
|
rfsoc_sam/quick_widgets.py
|
schelleg/rfsoc_sam
|
2a4ae0fc1686d1e3146cbea5d8578c7432910b92
|
[
"BSD-3-Clause"
] | 7
|
2021-01-19T18:46:19.000Z
|
2022-03-10T10:25:43.000Z
|
rfsoc_sam/quick_widgets.py
|
schelleg/rfsoc_sam
|
2a4ae0fc1686d1e3146cbea5d8578c7432910b92
|
[
"BSD-3-Clause"
] | 19
|
2020-02-25T10:42:51.000Z
|
2021-12-15T06:40:41.000Z
|
__author__ = "David Northcote"
__organisation__ = "The Univeristy of Strathclyde"
__support__ = "https://github.com/strath-sdr/rfsoc_sam"
import ipywidgets as ipw
class Label():
"""Helper class for label widgets.
"""
def __init__(self,
value,
svalue='',
evalue='',
dict_id=''):
self._svalue = svalue
self._evalue = evalue
self._label = ipw.Label(value=''.join([self._svalue,value,self._evalue]))
self._dict_id = dict_id
@property
def value(self):
return self._label.value
@value.setter
def value(self, value):
self._label.value = ''.join([self._svalue,value,self._evalue])
def get_widget(self):
return self._label
class DropDown():
"""Helper class for dropdown widgets.
"""
def __init__(self,
callback,
options,
value,
description,
dict_id = '',
description_width='150px',
layout_width='300px'):
def on_value_change(change):
callback({self._dict_id : change['new']})
self._dict_id = dict_id
self._dropdown = ipw.Dropdown(options=options,
value=value,
description=description,
style={'description_width': description_width},
layout = {'width': layout_width},)
self._dropdown.observe(on_value_change, names='value')
@property
def value(self):
return self._dropdown.value
@value.setter
def value(self, value):
self._dropdown.value = value
def get_widget(self):
return self._dropdown
class CheckBox():
"""Helper class for CheckBox widgets.
"""
def __init__(self,
callback,
value,
description,
indent=True,
dict_id = '',
description_width='150px',
layout_width='300px'):
def on_value_change(change):
callback({self._dict_id : change['new']})
self._dict_id = dict_id
self._checkbox = ipw.Checkbox(value=value,
description=description,
indent=indent,
style={'description_width': description_width},
layout = {'width': layout_width},)
self._checkbox.observe(on_value_change, names='value')
@property
def value(self):
return self._checkbox.value
@value.setter
def value(self, value):
self._checkbox.value = value
def get_widget(self):
return self._checkbox
class FloatText():
"""Helper class for float text widgets.
"""
def __init__(self,
callback,
value,
min_value,
max_value,
step,
description,
dict_id = '',
description_width='150px',
layout_width='300px'):
def on_value_change(change):
callback({self._dict_id : change['new']})
self._dict_id = dict_id
self._text_box = ipw.BoundedFloatText(
value=value,
min=min_value,
max=max_value,
step=step,
description=description,
continuous_update=False,
style={'description_width': description_width},
layout = {'width': layout_width},
disabled=False
)
self._text_box.observe(on_value_change, names='value')
@property
def value(self):
return self._text_box.value
@value.setter
def value(self, value):
self._text_box.value = value
@property
def step(self):
return self._text_box.step
@step.setter
def step(self, step):
self._text_box.step = step
def get_widget(self):
return self._text_box
class IntText():
"""Helper class for integer text widgets.
"""
def __init__(self,
callback,
value,
min_value,
max_value,
step,
description,
dict_id = '',
description_width='150px',
layout_width='300px'):
def on_value_change(change):
callback({self._dict_id : change['new']})
self._dict_id = dict_id
self._text_box = ipw.BoundedIntText(
value=value,
min=min_value,
max=max_value,
step=step,
description=description,
continuous_update=False,
style={'description_width': description_width},
layout = {'width': layout_width},
disabled=False
)
self._text_box.observe(on_value_change, names='value')
@property
def value(self):
return self._text_box.value
@value.setter
def value(self, value):
self._text_box.value = value
@property
def step(self):
return self._text_box.step
@step.setter
def step(self, step):
self._text_box.step = step
def get_widget(self):
return self._text_box
class Button():
"""Helper class for button widgets.
"""
def __init__(self,
callback,
description_on = ' ',
description_off = ' ',
state = True,
dict_id = ''):
self._state = state
self._dict_id = dict_id
self._callback = callback
self._button_colour = 'green'
self._description_on = description_on
self._description_off = description_off
self._button = ipw.Button(description=self._description_on if self._state else self._description_off,
layout=ipw.Layout(margin='auto',
border='none'))
self._button.on_click(lambda _: self.on_click())
if self._state:
self._button.style.button_color = self.button_colour
else:
self._button.style.button_color = 'rgb(128, 128, 128)'
@property
def button_colour(self):
return self._button_colour
@button_colour.setter
def button_colour(self, button_colour):
self._button_colour = button_colour
if self._state:
self._button.style.button_color = self._button_colour
@property
def value(self):
return self._state
@value.setter
def value(self, state):
self._state = state
if self._state:
self._button.style.button_color = self.button_colour
self._button.description = self._description_on
else:
self._button.style.button_color = 'rgb(128, 128, 128)'
self._button.description = self._description_off
def configure_state(self, state):
self.value = state
self._callback({self._dict_id : self._state})
def on_click(self):
self._state = not self._state
self._callback({self._dict_id : self._state})
if self._state:
self._button.style.button_color = self.button_colour
self._button.description = self._description_on
else:
self._button.style.button_color = 'rgb(128, 128, 128)'
self._button.description = self._description_off
def get_widget(self):
return self._button
class QuickButton():
"""Helper class for button widgets.
"""
def __init__(self,
callback,
description_on = ' ',
description_off = ' ',
state = True,
dict_id = ''):
self._state = state
self._dict_id = dict_id
self._callback = callback
self._button_colour = 'green'
self._description_on = description_on
self._description_off = description_off
self._button = ipw.Button(description=self._description_on if self._state else self._description_off,
layout=ipw.Layout(margin='auto',
border='none'))
self._button.on_click(lambda _: self.on_click())
if self._state:
self._button.style.button_color = self.button_colour
else:
self._button.style.button_color = 'rgb(128, 128, 128)'
@property
def button_colour(self):
return self._button_colour
@button_colour.setter
def button_colour(self, button_colour):
self._button_colour = button_colour
if self._state:
self._button.style.button_color = self._button_colour
@property
def value(self):
return self._state
@value.setter
def value(self, state):
self._state = state
if self._state:
self._button.style.button_color = self.button_colour
self._button.description = self._description_on
else:
self._button.style.button_color = 'rgb(128, 128, 128)'
self._button.description = self._description_off
def on_click(self):
if self._state:
self._button.style.button_color = 'rgb(128, 128, 128)'
self._button.description = self._description_off
self._callback()
self._button.style.button_color = self.button_colour
self._button.description = self._description_on
else:
self._button.style.button_color = self.button_colour
self._button.description = self._description_on
self._callback()
self._button.style.button_color = 'rgb(128, 128, 128)'
self._button.description = self._description_off
def get_widget(self):
return self._button
class Accordion():
"""Helper class for accordion widgets.
"""
def __init__(self,
title,
widgets,
callback=None,
dict_id='',
selected_index=0):
def on_value_change(change):
if self._callback is not None:
if change['new'] is None:
self._callback(self._dict_id, change['new'])
self._title = title
self._widgets = widgets
self._vbox = ipw.VBox([])
self._callback = callback
for i in range(len(self._widgets)):
self._vbox.children+=(self._widgets[i],)
self._accordion = ipw.Accordion(children=[self._vbox],
layout=ipw.Layout(width='auto'),
selected_index=selected_index)
self._accordion.set_title(0, self._title)
self._accordion.observe(callback, names='selected_index')
@property
def selected_index(self):
return self._accordion.selected_index
@selected_index.setter
def selected_index(self, selected_index):
self._accordion.selected_index = selected_index
def get_widget(self):
return self._accordion
class Image():
"""Helper class for displaying images.
"""
def __init__(self,
image_file,
image_format='png',
width=200,
height=200):
file = open(image_file, "rb")
image = file.read()
self._image = ipw.Image(value=image,
format=image_format,
width=width,
height=height)
def update_image(self, image_file):
file = open(image_file, "rb")
image = file.read()
self._image.value = image
def get_widget(self):
return self._image
| 30.461916
| 109
| 0.532344
|
543287c1e4538b1cb8dfb7b35d7a47ad8569d28f
| 2,659
|
py
|
Python
|
yanxidemo/yanxi.py
|
MisterZhouZhou/python3demo
|
da0b6771cc12e8e1066a115c3f72a90c100108ac
|
[
"Apache-2.0"
] | 3
|
2019-03-04T08:39:57.000Z
|
2019-12-06T08:29:47.000Z
|
yanxidemo/yanxi.py
|
MisterZhouZhou/python3demo
|
da0b6771cc12e8e1066a115c3f72a90c100108ac
|
[
"Apache-2.0"
] | null | null | null |
yanxidemo/yanxi.py
|
MisterZhouZhou/python3demo
|
da0b6771cc12e8e1066a115c3f72a90c100108ac
|
[
"Apache-2.0"
] | null | null | null |
from urllib import request
from bs4 import BeautifulSoup
from wordcloud import WordCloud,ImageColorGenerator,STOPWORDS
import collections
import matplotlib.pyplot as plt
def getHtml():
url = r'http://www.tvzn.com/14784/yanyuanbiao.html'
# 模拟真实浏览器进行访问
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
page = request.Request(url, headers=headers)
page_info = request.urlopen(page).read()
page_info = page_info.decode('utf-8')
return page_info
def getActorNames(contents1, contents2):
actorNamesList = []
for content in contents1:
mainActorName = content.find("p", class_="mh-title").find("a", class_="mh-actor").string
actorNamesList.append(mainActorName)
for content in contents2:
notMainActorName = content.find("p", class_="mh-l").find(class_="mh-actor").string
actorNamesList.append(notMainActorName)
return actorNamesList
def collectionNames(namelist):
surnamelist = []
givennamelist = []
surname_dict = {}
for actorname in nameList:
surnamelist.append(actorname[0])
for givenname in actorname[2:]:
givennamelist.append(givenname)
if actorname[0] not in surname_dict:
surname_dict[actorname[0]] = 1
else:
surname_dict[actorname[0]] += 1
return surnamelist, givennamelist, surname_dict
def wordCount(surenamelist):
word_count = collections.Counter(surenamelist)
backgroud_Image = plt.imread('test1.jpg')
# 设置词云属性
wc = WordCloud(font_path="/Users/zhouwei/Desktop/zw/python3/yanxidemo/simsun.ttc", # 设置字体
background_color="white", # 背景颜色
max_words=2000, # 词云显示的最大词数
mask=backgroud_Image, # 设置背景图片
max_font_size=100, # 字体最大值
stopwords=STOPWORDS,
random_state=30,
width=1000, height=860)
wc.generate_from_frequencies(word_count)
img_colors = ImageColorGenerator(backgroud_Image)
wc.recolor(color_func=img_colors)
plt.imshow(wc)
plt.axis('off')
plt.show()
if __name__ == '__main__':
html = getHtml()
# 将获取到的内容转换成BeautifulSoup格式,并将html.parser作为解析器
soup = BeautifulSoup(html, 'html.parser')
# 获取主演数据
contents1 = soup.find('ul', class_="gclearfix").findAll("li")
# 获取非主演数据
contents2 = soup.find('ul', class_="glearfix").findAll("li")
nameList = getActorNames(contents1, contents2)
surnamelist, givennamelist, surname_dict = collectionNames(nameList)
wordCount(surnamelist)
| 34.089744
| 134
| 0.66604
|
65487ac6bc96018c017cba330330a5f730b93d71
| 221
|
py
|
Python
|
job_app/celeryconfig.py
|
ahmedezzeldin93/heyjobs
|
ada72a4ede5eabf04f465ecd0b5f677253e95579
|
[
"MIT"
] | null | null | null |
job_app/celeryconfig.py
|
ahmedezzeldin93/heyjobs
|
ada72a4ede5eabf04f465ecd0b5f677253e95579
|
[
"MIT"
] | null | null | null |
job_app/celeryconfig.py
|
ahmedezzeldin93/heyjobs
|
ada72a4ede5eabf04f465ecd0b5f677253e95579
|
[
"MIT"
] | null | null | null |
broker_url = 'pyamqp://localhost:5672'
task_serializer = 'json'
result_serializer = 'json'
accept_content = ['json']
timezone = 'Europe/Berlin'
enable_utc = True
CELERY_IMPORTS=("job_app.tasks")
CELERY_IGNORE_RESULT=False
| 27.625
| 38
| 0.778281
|
35bba7f7eae15fd12f54b2b51b4bc56f7001158b
| 590
|
py
|
Python
|
2020/src_39th_week.py
|
fabichoi/1d1p
|
e978ed5f385114598ef2298385ed19ae61b06f5c
|
[
"MIT"
] | null | null | null |
2020/src_39th_week.py
|
fabichoi/1d1p
|
e978ed5f385114598ef2298385ed19ae61b06f5c
|
[
"MIT"
] | 1
|
2021-08-29T14:31:17.000Z
|
2021-08-29T14:31:17.000Z
|
2020/src_39th_week.py
|
fabichoi/1d1p
|
e978ed5f385114598ef2298385ed19ae61b06f5c
|
[
"MIT"
] | null | null | null |
# TopCoder : FriendScore
class FriendScore:
def highestScore(self, friends):
ans = 0
n = len(friends[0])
for i in range(n):
cnt = 0
for j in range(n):
if i == j:
continue
if friends[i][j] == 'Y':
cnt += 1
else:
for k in range(n):
if friends[j][k] == 'Y' and friends[k][i] == 'Y':
cnt += 1
break
ans = max(ans, cnt)
return ans
| 24.583333
| 73
| 0.345763
|
213ca6c7cee21fa66c4e77d089af006f4b88e9af
| 975
|
py
|
Python
|
chapter-4/charter/renderers/pdf/line_series.py
|
lucasbyAI/Modular-Programming-with-Python
|
387bff88e256eb2a4c3be2207908be274270be90
|
[
"MIT"
] | 38
|
2016-06-07T07:52:00.000Z
|
2022-02-08T21:29:02.000Z
|
chapter-4/charter/renderers/pdf/line_series.py
|
lucasbyAI/Modular-Programming-with-Python
|
387bff88e256eb2a4c3be2207908be274270be90
|
[
"MIT"
] | null | null | null |
chapter-4/charter/renderers/pdf/line_series.py
|
lucasbyAI/Modular-Programming-with-Python
|
387bff88e256eb2a4c3be2207908be274270be90
|
[
"MIT"
] | 35
|
2016-06-17T07:39:25.000Z
|
2022-02-02T18:18:49.000Z
|
# charter.renderers.pdf.line_series
#
# Renderer for drawing a line series onto the chart in PDF format.
from ...constants import *
#############################################################################
def draw(chart, canvas):
avail_width = CHART_WIDTH - Y_AXIS_WIDTH - MARGIN
bucket_width = avail_width / len(chart['x_axis'])
bottom = X_AXIS_HEIGHT
max_top = CHART_HEIGHT - TITLE_HEIGHT
avail_height = max_top - bottom
left = Y_AXIS_WIDTH
prev_y = None
for y_value in chart['series']:
y = ((y_value - chart['y_min']) /
(chart['y_max'] - chart['y_min']))
cur_y = bottom + int(y * avail_height)
if prev_y != None:
canvas.setStrokeColorRGB(0.25, 0.25, 0.625)
canvas.setLineWidth(1)
canvas.line(left - bucket_width / 2, prev_y,
left + bucket_width / 2, cur_y)
prev_y = cur_y
left = left + bucket_width
| 28.676471
| 77
| 0.548718
|
23e3dc35b2733751a5984742fc0105c7314e08bc
| 22,744
|
py
|
Python
|
pyjade/lexer.py
|
edemocracy/pyjade
|
4a4f52235795fd6367181a27a6bc1919acc7a2c5
|
[
"MIT"
] | 2
|
2019-02-27T10:36:17.000Z
|
2019-02-27T13:18:26.000Z
|
pyjade/lexer.py
|
edemocracy/pyjade
|
4a4f52235795fd6367181a27a6bc1919acc7a2c5
|
[
"MIT"
] | null | null | null |
pyjade/lexer.py
|
edemocracy/pyjade
|
4a4f52235795fd6367181a27a6bc1919acc7a2c5
|
[
"MIT"
] | 1
|
2018-06-20T07:43:53.000Z
|
2018-06-20T07:43:53.000Z
|
from __future__ import absolute_import
import re
from collections import deque
import six
class Token:
def __init__(self, **kwds):
self.buffer = None
self.__dict__.update(kwds)
def __str__(self):
return self.__dict__.__str__()
def regexec(regex, input):
matches = regex.match(input)
if matches:
return (input[matches.start():matches.end()],) + matches.groups()
return None
def detect_closing_bracket(string):
count = 0
pos = string.find('[')
while True:
if string[pos] == '[':
count += 1
if string[pos] == ']':
count -= 1
pos += 1
if count == 0:
return pos
def replace_string_brackets(splitted_string):
sval_replaced = []
old_delim = None
for i in splitted_string:
if old_delim is None:
sval_replaced.append(i)
if i in ('"', "'"):
old_delim = i
continue
if i in ('"', "'"):
if i == old_delim:
old_delim = None
sval_replaced.append(i)
continue
sval_replaced.append(re.sub(r'\[|\]', '*', i))
return ''.join(sval_replaced)
class Lexer(object):
RE_INPUT = re.compile(r'\r\n|\r')
RE_COMMENT = re.compile(r'^ *\/\/(-)?([^\n]*)')
RE_TAG = re.compile(r'^(\w[-:\w]*)')
RE_DOT_BLOCK_START = re.compile(r'^\.\n')
RE_FILTER = re.compile(r'^:(\w+)')
RE_DOCTYPE = re.compile(r'^(?:!!!|doctype) *([^\n]+)?')
RE_ID = re.compile(r'^#([\w-]+)')
RE_CLASS = re.compile(r'^\.([\w-]+)')
RE_STRING = re.compile(r'^(?:\| ?)([^\n]+)')
RE_TEXT = re.compile(r'^([^\n]+)')
RE_EXTENDS = re.compile(r'^extends? +([^\n]+)')
RE_PREPEND = re.compile(r'^prepend +([^\n]+)')
RE_APPEND = re.compile(r'^append +([^\n]+)')
RE_BLOCK = re.compile(r'''^block(( +(?:(prepend|append) +)?([^\n]*))|\n)''')
RE_YIELD = re.compile(r'^yield *')
RE_INCLUDE = re.compile(r'^include +([^\n]+)')
RE_ASSIGNMENT = re.compile(r'^(-\s+var\s+)?(\w+) += *([^;\n]+)( *;? *)')
RE_MIXIN = re.compile(r'^mixin +([-\w]+)(?: *\((.*)\))?')
RE_CALL = re.compile(r'^\+\s*([-.\w]+)(?: *\((.*)\))?')
RE_CONDITIONAL = re.compile(r'^(?:- *)?(if|unless|else if|elif|else)\b([^\n]*)')
RE_BLANK = re.compile(r'^\n *\n')
# RE_WHILE = re.compile(r'^while +([^\n]+)')
RE_EACH = re.compile(r'^(?:- *)?(?:each|for) +([\w, ]+) +in +([^\n]+)')
RE_CODE = re.compile(r'^(!?=|-)([^\n]+)')
RE_ATTR_INTERPOLATE = re.compile(r'#\{([^}]+)\}')
RE_ATTR_PARSE = re.compile(r'''^['"]|['"]$''')
RE_INDENT_TABS = re.compile(r'^\n(\t*) *')
RE_INDENT_SPACES = re.compile(r'^\n( *)')
RE_COLON = re.compile(r'^: *')
RE_INLINE = re.compile(r'(?<!\\)#\[')
RE_INLINE_ESCAPE = re.compile(r'\\#\[')
STRING_SPLITS = re.compile(r'([\'"])(.*?)(?<!\\)(\1)')
def __init__(self, string, **options):
if isinstance(string, six.binary_type):
string = six.text_type(string, 'utf8')
self.options = options
self.input = self.RE_INPUT.sub('\n', string)
self.colons = self.options.get('colons', False)
self.deferredTokens = deque()
self.lastIndents = 0
self.lineno = 1
self.stash = deque()
self.indentStack = deque()
self.indentRe = None
self.pipeless = False
self.isTextBlock = False
def tok(self, type, val=None):
return Token(type=type, line=self.lineno, val=val, inline_level=self.options.get('inline_level', 0))
def consume(self, len):
self.input = self.input[len:]
def scan(self, regexp, type):
captures = regexec(regexp, self.input)
# print regexp,type, self.input, captures
if captures:
# print captures
self.consume(len(captures[0]))
# print 'a',self.input
if len(captures) == 1:
return self.tok(type, None)
return self.tok(type, captures[1])
def defer(self, tok):
self.deferredTokens.append(tok)
def lookahead(self, n):
# print self.stash
fetch = n - len(self.stash)
while True:
fetch -= 1
if not fetch >= 0:
break
self.stash.append(self.next())
return self.stash[n - 1]
def indexOfDelimiters(self, start, end):
str, nstart, nend, pos = self.input, 0, 0, 0
for i, s in enumerate(str):
if start == s:
nstart += 1
elif end == s:
nend += 1
if nend == nstart:
pos = i
break
return pos
def stashed(self):
# print self.stash
return len(self.stash) and self.stash.popleft()
def deferred(self):
return len(self.deferredTokens) and self.deferredTokens.popleft()
def eos(self):
# print 'eos',bool(self.input)
if self.input:
return
if self.indentStack:
self.indentStack.popleft()
return self.tok('outdent')
else:
return self.tok('eos')
def consumeBlank(self):
captures = regexec(self.RE_BLANK, self.input)
if not captures:
return
self.lineno += 1
self.consume(len(captures[0]) - 1)
return captures
def blank(self):
if self.pipeless:
return
if self.consumeBlank():
return self.next()
def comment(self):
captures = regexec(self.RE_COMMENT, self.input)
if captures:
self.consume(len(captures[0]))
tok = self.tok('comment', captures[2])
tok.buffer = '-' != captures[1]
return tok
def tag(self):
captures = regexec(self.RE_TAG, self.input)
# print self.input,captures,re.match('^(\w[-:\w]*)',self.input)
if captures:
self.consume(len(captures[0]))
name = captures[1]
if name.endswith(':'):
name = name[:-1]
tok = self.tok('tag', name)
self.defer(self.tok(':'))
while self.input[0] == ' ':
self.input = self.input[1:]
else:
tok = self.tok('tag', name)
return tok
def textBlockStart(self):
captures = regexec(self.RE_DOT_BLOCK_START, self.input)
if captures is None:
return
if len(self.indentStack) > 0:
self.textBlockTagIndent = self.indentStack[0]
else:
self.textBlockTagIndent = 0
self.consume(1)
self.isTextBlock = True
return self.textBlockContinue(isStart=True)
def textBlockContinue(self, isStart=False):
if not self.isTextBlock:
return
tokens = deque()
while True:
if self.consumeBlank():
if not isStart:
tokens.append(self.tok('string', ''))
continue
eos = self.eos()
if eos is not None:
if isStart:
return eos
tokens.append(eos)
break
nextIndent = self.captureIndent()
if nextIndent is None or len(nextIndent[1]) <= self.textBlockTagIndent:
self.isTextBlock = False
if isStart:
return self.tok('newline')
break
padding = 0
if not isStart and len(nextIndent[1]) > self.textBlockIndent:
padding = len(nextIndent[1]) - self.textBlockIndent
self.consume(1 + padding)
self.input = '\n' + self.input
indent = self.indent()
if isStart:
self.textBlockIndent = indent.val
padding = 0
itoks = self.scanInline(self.RE_TEXT, 'string')
indentChar = self.indentRe == self.RE_INDENT_TABS and '\t' or ' '
if itoks:
itoks[0].val = (indentChar * padding) + itoks[0].val
if isStart:
for tok in itoks or []:
self.defer(tok)
return indent
tokens.extend(itoks)
if not tokens:
firstTok = None
else:
firstTok = tokens.popleft()
while tokens:
if tokens[-1].type == 'string' and not tokens[-1].val:
tokens.pop()
continue
self.defer(tokens.popleft())
self.isTextBlock = False
return firstTok
def filter(self):
return self.scan(self.RE_FILTER, 'filter')
def doctype(self):
# print self.scan(self.RE_DOCTYPE, 'doctype')
return self.scan(self.RE_DOCTYPE, 'doctype')
def id(self):
return self.scan(self.RE_ID, 'id')
def className(self):
return self.scan(self.RE_CLASS, 'class')
def processInline(self, val):
sval = self.STRING_SPLITS.split(val)
sval_stripped = [i.strip() for i in sval]
if sval_stripped.count('"') % 2 != 0 or sval_stripped.count("'") % 2 != 0:
raise Exception('Unbalanced quotes found inside inline jade at line %s.' % self.lineno)
sval_replaced = replace_string_brackets(sval)
start_inline = self.RE_INLINE.search(sval_replaced).start()
try:
closing = start_inline + detect_closing_bracket(sval_replaced[start_inline:])
except IndexError:
raise Exception('The end of the string was reached with no closing bracket found at line %s.' % self.lineno)
textl = val[:start_inline]
code = val[start_inline:closing][2:-1]
textr = val[closing:]
toks = deque()
toks.append(self.tok('string', self.RE_INLINE_ESCAPE.sub('#[', textl)))
ilexer = InlineLexer(code, inline_level=self.options.get('inline_level', 0) + 1)
while True:
tok = ilexer.advance()
if tok.type == 'eos':
break
toks.append(tok)
if self.RE_INLINE.search(textr):
toks.extend(self.processInline(textr))
else:
toks.append(self.tok('string', self.RE_INLINE_ESCAPE.sub('#[', textr)))
return toks
def scanInline(self, regexp, type):
ret = self.scan(regexp, type)
if ret is None:
return ret
if self.RE_INLINE.search(ret.val):
ret = self.processInline(ret.val)
if ret:
ret[0].val = ret[0].val.lstrip()
else:
ret.val = self.RE_INLINE_ESCAPE.sub('#[', ret.val)
ret = deque([ret])
return ret
def scanInlineProcess(self, regexp, type_):
toks = self.scanInline(regexp, type_)
if not toks:
return None
firstTok = toks.popleft()
for tok in toks:
self.defer(tok)
return firstTok
def string(self):
return self.scanInlineProcess(self.RE_STRING, 'string')
def text(self):
return self.scanInlineProcess(self.RE_TEXT, 'text')
def extends(self):
return self.scan(self.RE_EXTENDS, 'extends')
def prepend(self):
captures = regexec(self.RE_PREPEND, self.input)
if captures:
self.consume(len(captures[0]))
mode, name = 'prepend', captures[1]
tok = self.tok('block', name)
tok.mode = mode
return tok
def append(self):
captures = regexec(self.RE_APPEND, self.input)
if captures:
self.consume(len(captures[0]))
mode, name = 'append', captures[1]
tok = self.tok('block', name)
tok.mode = mode
return tok
def block(self):
captures = regexec(self.RE_BLOCK, self.input)
if captures:
self.consume(len(captures[0]))
mode = captures[3] or 'replace'
name = captures[4] or ''
tok = self.tok('block', name)
tok.mode = mode
return tok
def _yield(self):
return self.scan(self.RE_YIELD, 'yield')
def include(self):
return self.scan(self.RE_INCLUDE, 'include')
def assignment(self):
captures = regexec(self.RE_ASSIGNMENT, self.input)
if captures:
self.consume(len(captures[0]))
name, val = captures[2:4]
tok = self.tok('assignment')
tok.name = name
tok.val = val
return tok
def mixin(self):
captures = regexec(self.RE_MIXIN, self.input)
if captures:
self.consume(len(captures[0]))
tok = self.tok('mixin', captures[1])
tok.args = captures[2]
return tok
def call(self):
captures = regexec(self.RE_CALL, self.input)
if captures:
self.consume(len(captures[0]))
tok = self.tok('call', captures[1])
tok.args = captures[2]
return tok
def conditional(self):
captures = regexec(self.RE_CONDITIONAL, self.input)
if captures:
self.consume(len(captures[0]))
type, sentence = captures[1:]
tok = self.tok('conditional', type)
tok.sentence = sentence
return tok
# def _while(self):
# captures = regexec(self.RE_WHILE,self.input)
# if captures:
# self.consume(len(captures[0]))
# return self.tok('code','while(%s)'%captures[1])
def each(self):
captures = regexec(self.RE_EACH, self.input)
if captures:
self.consume(len(captures[0]))
tok = self.tok('each', None)
tok.keys = [x.strip() for x in captures[1].split(',')]
tok.code = captures[2]
return tok
def code(self):
captures = regexec(self.RE_CODE, self.input)
if captures:
self.consume(len(captures[0]))
flags, name = captures[1:]
tok = self.tok('code', name)
tok.escape = flags.startswith('=')
#print captures
tok.buffer = '=' in flags
# print tok.buffer
return tok
def attrs(self):
if '(' == self.input[0]:
index = self.indexOfDelimiters('(', ')')
string = self.input[1:index]
tok = self.tok('attrs')
l = len(string)
colons = self.colons
states = ['key']
class Namespace:
key = u''
val = u''
quote = u''
literal = True
def reset(self):
self.key = self.val = self.quote = u''
self.literal = True
def __str__(self):
return dict(key=self.key, val=self.val, quote=self.quote,
literal=self.literal).__str__()
ns = Namespace()
def state():
return states[-1]
def interpolate(attr):
attr, num = self.RE_ATTR_INTERPOLATE.subn(lambda matchobj: '%s+"{}".format(%s)+%s' % (ns.quote, matchobj.group(1), ns.quote), attr)
return attr, (num > 0)
self.consume(index + 1)
from .utils import odict
tok.attrs = odict()
tok.static_attrs = set()
str_nums = list(map(str, range(10)))
# print '------'
def parse(c):
real = c
if colons and ':' == c:
c = '='
ns.literal = ns.literal and (state() not in ('object', 'array',
'expr'))
# print ns, c, states
if c in (',', '\n') or (c == ' ' and state() == 'val' and len(states) == 2 and ns.val.strip()):
s = state()
if s in ('expr', 'array', 'string', 'object'):
ns.val += c
else:
states.append('key')
ns.val = ns.val.strip()
ns.key = ns.key.strip()
if not ns.key:
return
# ns.literal = ns.quote
if not ns.literal:
if '!' == ns.key[-1]:
ns.literal = True
ns.key = ns.key[:-1]
ns.key = ns.key.strip("'\"")
if not ns.val:
tok.attrs[ns.key] = True
else:
tok.attrs[ns.key], is_interpolated = interpolate(ns.val)
ns.literal = ns.literal and not is_interpolated
if ns.literal:
tok.static_attrs.add(ns.key)
ns.reset()
elif '=' == c:
s = state()
if s == 'key char':
ns.key += real
elif s in ('val', 'expr', 'array', 'string', 'object'):
ns.val += real
else:
states.append('val')
elif '(' == c:
if state() in ('val', 'expr'):
states.append('expr')
ns.val += c
elif ')' == c:
if state() in ('val', 'expr'):
states.pop()
ns.val += c
elif '{' == c:
if 'val' == state():
states.append('object')
ns.val += c
elif '}' == c:
if 'object' == state():
states.pop()
ns.val += c
elif '[' == c:
if 'val' == state():
states.append('array')
ns.val += c
elif ']' == c:
if 'array' == state():
states.pop()
ns.val += c
elif c in ('"', "'"):
s = state()
if 'key' == s:
states.append('key char')
elif 'key char' == s:
states.pop()
elif 'string' == s:
if c == ns.quote:
states.pop()
ns.val += c
else:
states.append('string')
ns.val += c
ns.quote = c
elif '' == c:
pass
else:
s = state()
ns.literal = ns.literal and (s in ('key', 'string') or c in str_nums)
# print c, s, ns.literal
if s in ('key', 'key char'):
ns.key += c
else:
ns.val += c
for char in string:
parse(char)
parse(',')
return tok
def captureIndent(self):
if self.indentRe:
captures = regexec(self.indentRe, self.input)
else:
regex = self.RE_INDENT_TABS
captures = regexec(regex, self.input)
if captures and not captures[1]:
regex = self.RE_INDENT_SPACES
captures = regexec(regex, self.input)
if captures and captures[1]:
self.indentRe = regex
return captures
def indent(self):
captures = self.captureIndent()
if captures:
indents = len(captures[1])
self.lineno += 1
self.consume(indents + 1)
if not self.input:
return self.tok('newline')
if self.input[0] in (' ', '\t'):
raise Exception('Invalid indentation, you can use tabs or spaces but not both')
if '\n' == self.input[0]:
return self.tok('newline')
if self.indentStack and indents < self.indentStack[0]:
while self.indentStack and self.indentStack[0] > indents:
self.stash.append(self.tok('outdent'))
self.indentStack.popleft()
tok = self.stash.pop()
elif indents and (not self.indentStack or indents != self.indentStack[0]):
self.indentStack.appendleft(indents)
tok = self.tok('indent', indents)
else:
tok = self.tok('newline')
return tok
def pipelessText(self):
if self.pipeless:
if '\n' == self.input[0]:
return
i = self.input.find('\n')
if -1 == i:
i = len(self.input)
str = self.input[:i]
self.consume(len(str))
return self.tok('text', str)
def colon(self):
return self.scan(self.RE_COLON, ':')
def advance(self):
return self.stashed() or self.next()
def next(self):
return self.deferred() \
or self.textBlockContinue() \
or self.blank() \
or self.eos() \
or self.pipelessText() \
or self._yield() \
or self.doctype() \
or self.extends() \
or self.append() \
or self.prepend() \
or self.block() \
or self.include() \
or self.mixin() \
or self.call() \
or self.conditional() \
or self.each() \
or self.assignment() \
or self.tag() \
or self.textBlockStart() \
or self.filter() \
or self.code() \
or self.id() \
or self.className() \
or self.attrs() \
or self.indent() \
or self.comment() \
or self.colon() \
or self.string() \
or self.text()
##or self._while() \
class InlineLexer(Lexer):
def next(self):
return self.deferred() \
or self.blank() \
or self.eos() \
or self.pipelessText() \
or self.mixin() \
or self.call() \
or self.assignment() \
or self.tag() \
or self.code() \
or self.id() \
or self.className() \
or self.attrs() \
or self.colon() \
or self.string() \
or self.text()
| 32.537911
| 147
| 0.471201
|
da3bc1afef32a0020bb900f54579368d6746d452
| 5,077
|
py
|
Python
|
linefood/hooks.py
|
ahmadRagheb/linefood
|
834f2ee6adbce5f454d627b32fab43a62329db18
|
[
"MIT"
] | 6
|
2017-08-21T23:09:29.000Z
|
2021-05-25T12:48:57.000Z
|
linefood/hooks.py
|
bharathjinka09/linefood
|
834f2ee6adbce5f454d627b32fab43a62329db18
|
[
"MIT"
] | 10
|
2018-03-09T16:14:49.000Z
|
2020-11-28T13:58:30.000Z
|
linefood/hooks.py
|
bharathjinka09/linefood
|
834f2ee6adbce5f454d627b32fab43a62329db18
|
[
"MIT"
] | 13
|
2017-12-21T10:04:01.000Z
|
2022-01-31T00:05:50.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "linefood"
app_title = "linefood"
app_publisher = "ahmad ragheb"
app_description = "linefood application "
app_icon = "octicon octicon-file-directory"
app_color = "red"
app_email = "ahmedragheb75@gmail.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/linefood/css/linefood.css"
# app_include_js = "/assets/linefood/js/linefood.js"
# include js, css files in header of web template
# "/assets/linefood/css/main-style.css",
web_include_css = [
"/assets/linefood/css/bootstrap.css",
"/assets/linefood/css/style.css",
"/assets/linefood/css/responsive.css",
"/assets/linefood/css/animate.css",
"/assets/linefood/css/colors/color1.css",
"/assets/linefood/css/owl.carousel.min.css",
]
# "/assets/linefood/js/main-scripts.js",
web_include_js = [
"/assets/linefood/js/jquery.min.js",
"/assets/linefood/js/tether.min.js",
"/assets/linefood/js/bootstrap.min.js",
"/assets/linefood/js/jquery.easing.js",
"/assets/linefood/js/parallax.js",
"/assets/linefood/js/jquery-waypoints.js",
"/assets/linefood/js/jquery-countTo.js",
"/assets/linefood/js/jquery.countdown.js",
"/assets/linefood/js/jquery.flexslider-min.js",
"/assets/linefood/js/images-loaded.js",
"/assets/linefood/js/jquery.isotope.min.js",
"/assets/linefood/js/magnific.popup.min.js",
"/assets/linefood/js/jquery.hoverdir.js",
"/assets/linefood/js/owl.carousel.min.js",
"/assets/linefood/js/equalize.min.js",
"/assets/linefood/js/gmap3.min.js",
"/assets/linefood/js/jquery-ui.js",
"/assets/linefood/js/jquery.cookie.js",
"/assets/linefood/js/main.js",
"/assets/linefood/rev-slider/js/jquery.themepunch.tools.min.js",
"/assets/linefood/rev-slider/js/jquery.themepunch.revolution.min.js",
"/assets/linefood/js/rev-slider.js",
"/assets/linefood/js/switcher.js",
"/assets/linefood/rev-slider/js/extensions/revolution.extension.actions.min.js",
"/assets/linefood/rev-slider/js/extensions/revolution.extension.carousel.min.js",
"/assets/linefood/rev-slider/js/extensions/revolution.extension.kenburn.min.js",
"/assets/linefood/rev-slider/js/extensions/revolution.extension.layeranimation.min.js",
"/assets/linefood/rev-slider/js/extensions/revolution.extension.migration.min.js",
"/assets/linefood/rev-slider/js/extensions/revolution.extension.navigation.min.js",
"/assets/linefood/rev-slider/js/extensions/revolution.extension.parallax.min.js",
"/assets/linefood/rev-slider/js/extensions/revolution.extension.slideanims.min.js",
"/assets/linefood/rev-slider/js/extensions/revolution.extension.video.min.js"
]
# include js in page
# page_js = {"page" : "public/js/file.js"}
# page_js = {"main" : "public/js/main-scripts.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
home_page = "line_food"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# role_home_page = {
# "Guest": "home"
# }
# Website user home page (by function)
# get_website_user_home_page = "linefood.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "linefood.install.before_install"
# after_install = "linefood.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "linefood.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "linefood.tasks.all"
# ],
# "daily": [
# "linefood.tasks.daily"
# ],
# "hourly": [
# "linefood.tasks.hourly"
# ],
# "weekly": [
# "linefood.tasks.weekly"
# ]
# "monthly": [
# "linefood.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "linefood.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "linefood.event.get_events"
# }
fixtures = [
{
"dt": "Custom Field",
"filters": [["name", "in",
["Blog Post-image"]]]
}
]
| 28.683616
| 91
| 0.677565
|
3cfdcff8c91dafc0afc9a6996e8e030e3cba655e
| 216
|
py
|
Python
|
condastats/__init__.py
|
sophiamyang/condastats
|
f275d96d24e0cadf9963e88770c29ceeaaa80c05
|
[
"BSD-3-Clause"
] | 3
|
2020-01-17T22:17:22.000Z
|
2021-12-10T18:32:55.000Z
|
condastats/__init__.py
|
sophiamyang/condastats
|
f275d96d24e0cadf9963e88770c29ceeaaa80c05
|
[
"BSD-3-Clause"
] | 7
|
2021-03-17T13:45:44.000Z
|
2022-03-25T00:04:47.000Z
|
condastats/__init__.py
|
sophiamyang/condastats
|
f275d96d24e0cadf9963e88770c29ceeaaa80c05
|
[
"BSD-3-Clause"
] | 3
|
2019-11-03T20:28:31.000Z
|
2021-03-17T16:19:13.000Z
|
# -*- coding: utf-8 -*-
"""Top-level package for condastats."""
__author__ = """Sophia Man Yang"""
__version__ = '0.1.2'
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 19.636364
| 39
| 0.694444
|
192935158eea8fd1f3fe7e7393f209cfa7f44b89
| 255
|
py
|
Python
|
web/apis/moments/urls.py
|
KhomDrake/Remember-Backend
|
233f72ef3a54300e25946bdc3a1baae5b56d7a25
|
[
"MIT"
] | null | null | null |
web/apis/moments/urls.py
|
KhomDrake/Remember-Backend
|
233f72ef3a54300e25946bdc3a1baae5b56d7a25
|
[
"MIT"
] | null | null | null |
web/apis/moments/urls.py
|
KhomDrake/Remember-Backend
|
233f72ef3a54300e25946bdc3a1baae5b56d7a25
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from .views import MomentsView
from rest_framework.routers import SimpleRouter
router = SimpleRouter()
router.register('', MomentsView, basename='api.moments')
urlpatterns = [
path('', include(router.urls)),
]
| 21.25
| 56
| 0.756863
|
227b70ab0a4331f272563cd0cc376816a46cf874
| 691
|
py
|
Python
|
autos/urls.py
|
Shadow-AI/dj4e-samples
|
60f21f3c6bbb4bd96641ced82b18be109364452a
|
[
"MIT"
] | null | null | null |
autos/urls.py
|
Shadow-AI/dj4e-samples
|
60f21f3c6bbb4bd96641ced82b18be109364452a
|
[
"MIT"
] | null | null | null |
autos/urls.py
|
Shadow-AI/dj4e-samples
|
60f21f3c6bbb4bd96641ced82b18be109364452a
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'autos'
urlpatterns = [
path('', views.MainView.as_view(), name='all'),
path('main/create/', views.AutoCreate.as_view(), name='auto_create'),
path('main/<int:pk>/update/', views.AutoUpdate.as_view(), name="auto_update"),
path('main/<int:pk>/delete/', views.AutoDelete.as_view(), name="auto_delete"),
path('lookup/', views.MakeView.as_view(), name='make_list'),
path('lookup/create/', views.MakeCreate.as_view(), name='make_create'),
path('lookup/<int:pk>/update/', views.MakeUpdate.as_view(), name='make_update'),
path('lookup/<int:pk>/delete/', views.MakeDelete.as_view(), name='make_delete'),
]
| 43.1875
| 84
| 0.678726
|
6db7465b3be71272e821089d42dee9664498ea8f
| 6,281
|
py
|
Python
|
ml3d/datasets/scannet.py
|
eiiijiiiy/Open3D-ML
|
2a04231df0be39e2b8030e480d342cee5574fb9a
|
[
"MIT"
] | null | null | null |
ml3d/datasets/scannet.py
|
eiiijiiiy/Open3D-ML
|
2a04231df0be39e2b8030e480d342cee5574fb9a
|
[
"MIT"
] | null | null | null |
ml3d/datasets/scannet.py
|
eiiijiiiy/Open3D-ML
|
2a04231df0be39e2b8030e480d342cee5574fb9a
|
[
"MIT"
] | null | null | null |
import open3d as o3d
import numpy as np
import os, argparse, pickle, sys
from os.path import exists, join, isfile, dirname, abspath, split
from pathlib import Path
from glob import glob
import logging
import yaml
from .base_dataset import BaseDataset
from ..utils import Config, make_dir, DATASET
from .utils import BEVBox3D
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s - %(asctime)s - %(module)s - %(message)s',
)
log = logging.getLogger(__name__)
class Scannet(BaseDataset):
"""
Scannet 3D dataset for Object Detection, used in visualizer, training, or test
"""
def __init__(self,
dataset_path,
name='Scannet',
cache_dir='./logs/cache',
use_cache=False,
**kwargs):
"""
Initialize
Args:
dataset_path (str): path to the dataset
kwargs:
"""
super().__init__(dataset_path=dataset_path,
name=name,
cache_dir=cache_dir,
use_cache=use_cache,
**kwargs)
cfg = self.cfg
self.name = cfg.name
self.dataset_path = cfg.dataset_path
self.num_classes = 18
self.classes = [
'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain',
'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',
'garbagebin'
]
self.cat2label = {cat: self.classes.index(cat) for cat in self.classes}
self.label2cat = {self.cat2label[t]: t for t in self.cat2label}
self.cat_ids = np.array(
[3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.cat_ids2class = {
nyu40id: i for i, nyu40id in enumerate(list(self.cat_ids))
}
self.label_to_names = self.get_label_to_names()
available_scenes = []
files = os.listdir(dataset_path)
for f in files:
if 'scene' in f and f.endswith('.npy'):
available_scenes.append(f[:12])
available_scenes = list(set(available_scenes))
resource_path = Path(__file__).parent / '_resources' / 'scannet'
train_files = open(resource_path /
'scannetv2_train.txt').read().split('\n')[:-1]
val_files = open(resource_path /
'scannetv2_val.txt').read().split('\n')[:-1]
test_files = open(resource_path /
'scannetv2_test.txt').read().split('\n')[:-1]
self.train_scenes = []
self.val_scenes = []
self.test_scenes = []
for scene in available_scenes:
if scene in train_files:
self.train_scenes.append(join(self.dataset_path, scene))
elif scene in val_files:
self.val_scenes.append(join(self.dataset_path, scene))
elif scene in test_files:
self.test_scenes.append(join(self.dataset_path, scene))
self.semantic_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36,
39
]
def get_label_to_names(self):
return self.label2cat
@staticmethod
def read_lidar(path):
assert Path(path).exists()
data = np.load(path)
return data
def read_label(self, scene):
instance_mask = np.load(scene + '_ins_label.npy')
semantic_mask = np.load(scene + '_sem_label.npy')
bboxes = np.load(scene + '_bbox.npy')
## For filtering semantic labels to have same classes as object detection.
# for i in range(semantic_mask.shape[0]):
# semantic_mask[i] = self.cat_ids2class.get(semantic_mask[i], 0)
remapper = np.ones(150) * (-100)
for i, x in enumerate(self.semantic_ids):
remapper[x] = i
semantic_mask = remapper[semantic_mask]
objects = []
for box in bboxes:
name = self.label2cat[self.cat_ids2class[int(box[-1])]]
center = box[:3]
size = [box[3], box[5], box[4]] # w, h, l
yaw = 0.0
objects.append(Object3d(name, center, size, yaw))
return objects, semantic_mask, instance_mask
def get_split(self, split):
return ScannetSplit(self, split=split)
def get_split_list(self, split):
if split in ['train', 'training']:
return self.train_scenes
elif split in ['test', 'testing']:
return self.test_scenes
elif split in ['val', 'validation']:
return self.val_scenes
raise ValueError("Invalid split {}".format(split))
def is_tested(self):
pass
def save_test_result(self):
pass
class ScannetSplit():
def __init__(self, dataset, split='train'):
self.cfg = dataset.cfg
self.path_list = dataset.get_split_list(split)
log.info("Found {} pointclouds for {}".format(len(self.path_list),
split))
self.split = split
self.dataset = dataset
def __len__(self):
return len(self.path_list)
def get_data(self, idx):
scene = self.path_list[idx]
pc = self.dataset.read_lidar(scene + '_vert.npy')
feat = pc[:, 3:]
pc = pc[:, :3]
bboxes, semantic_mask, instance_mask = self.dataset.read_label(scene)
data = {
'point': pc,
'feat': feat,
'calib': None,
'bounding_boxes': bboxes,
'label': semantic_mask.astype(np.int32),
'instance': instance_mask.astype(np.int32)
}
return data
def get_attr(self, idx):
pc_path = self.path_list[idx]
name = Path(pc_path).name.split('.')[0]
attr = {'name': name, 'path': str(pc_path), 'split': self.split}
return attr
class Object3d(BEVBox3D):
"""
Stores object specific details like bbox coordinates.
"""
def __init__(self, name, center, size, yaw):
super().__init__(center, size, yaw, name, -1.0)
self.occlusion = 0.0
DATASET._register_module(Scannet)
| 29.767773
| 82
| 0.559943
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.