hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e5d4e890453ea8e5fbfe2ebf50562df166f5e0a9
| 81
|
py
|
Python
|
SimPEG/utils/codeutils.py
|
jcapriot/simpeg
|
e88e653673c6b818592b6c075f76ee9215fe82b7
|
[
"MIT"
] | 1
|
2021-08-07T13:50:54.000Z
|
2021-08-07T13:50:54.000Z
|
SimPEG/utils/codeutils.py
|
jcapriot/simpeg
|
e88e653673c6b818592b6c075f76ee9215fe82b7
|
[
"MIT"
] | null | null | null |
SimPEG/utils/codeutils.py
|
jcapriot/simpeg
|
e88e653673c6b818592b6c075f76ee9215fe82b7
|
[
"MIT"
] | 1
|
2021-01-05T18:16:54.000Z
|
2021-01-05T18:16:54.000Z
|
from .code_utils import *
deprecate_module("codeutils", "code_utils", "0.15.0")
| 20.25
| 53
| 0.728395
|
b6aa8e7f83f7945189678e3be7661d376ffdb8e6
| 1,629
|
py
|
Python
|
python_project_template/__main__.py
|
bonginc/python-project-template
|
44a9fc82f682f8af3e2fb0a0d360845d0eb8e1c4
|
[
"Unlicense"
] | null | null | null |
python_project_template/__main__.py
|
bonginc/python-project-template
|
44a9fc82f682f8af3e2fb0a0d360845d0eb8e1c4
|
[
"Unlicense"
] | null | null | null |
python_project_template/__main__.py
|
bonginc/python-project-template
|
44a9fc82f682f8af3e2fb0a0d360845d0eb8e1c4
|
[
"Unlicense"
] | null | null | null |
import argparse # pragma: no cover
from . import BaseClass, base_function # pragma: no cover
def main() -> None: # pragma: no cover
"""
The main function executes on commands:
`python -m python_project_template` and `$ python_project_template `.
This is your program's entry point.
You can change this function to do whatever you want.
Examples:
* Run a test suite
* Run a server
* Do some other stuff
* Run a command line application (Click, Typer, ArgParse)
* List all available tasks
* Run an application (Flask, FastAPI, Django, etc.)
"""
parser = argparse.ArgumentParser(
description="python_project_template.",
epilog="Enjoy the python_project_template functionality!",
)
# This is required positional argument
parser.add_argument(
"name",
type=str,
help="The username",
default="bonginc",
)
# This is optional named argument
parser.add_argument(
"-m",
"--message",
type=str,
help="The Message",
default="Hello",
required=False,
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Optionally adds verbosity",
)
args = parser.parse_args()
print(f"{args.message} {args.name}!")
if args.verbose:
print("Verbose mode is on.")
print("Executing main function")
base = BaseClass()
print(base.base_method())
print(base_function())
print("End of main function")
if __name__ == "__main__": # pragma: no cover
main()
| 26.274194
| 73
| 0.605893
|
e9f0f292ee2a55f5c2bf5289530cbd8afeb12c80
| 4,100
|
py
|
Python
|
synapse/util/httpresourcetree.py
|
TheJJ/synapse
|
1032393dfb0c865fc540539dfe649e7b1a32037a
|
[
"Apache-2.0"
] | null | null | null |
synapse/util/httpresourcetree.py
|
TheJJ/synapse
|
1032393dfb0c865fc540539dfe649e7b1a32037a
|
[
"Apache-2.0"
] | null | null | null |
synapse/util/httpresourcetree.py
|
TheJJ/synapse
|
1032393dfb0c865fc540539dfe649e7b1a32037a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.web.resource import NoResource
import logging
logger = logging.getLogger(__name__)
def create_resource_tree(desired_tree, root_resource):
"""Create the resource tree for this Home Server.
This in unduly complicated because Twisted does not support putting
child resources more than 1 level deep at a time.
Args:
web_client (bool): True to enable the web client.
root_resource (twisted.web.resource.Resource): The root
resource to add the tree to.
Returns:
twisted.web.resource.Resource: the ``root_resource`` with a tree of
child resources added to it.
"""
# ideally we'd just use getChild and putChild but getChild doesn't work
# unless you give it a Request object IN ADDITION to the name :/ So
# instead, we'll store a copy of this mapping so we can actually add
# extra resources to existing nodes. See self._resource_id for the key.
resource_mappings = {}
for full_path, res in desired_tree.items():
# twisted requires all resources to be bytes
full_path = full_path.encode("utf-8")
logger.info("Attaching %s to path %s", res, full_path)
last_resource = root_resource
for path_seg in full_path.split(b'/')[1:-1]:
if path_seg not in last_resource.listNames():
# resource doesn't exist, so make a "dummy resource"
child_resource = NoResource()
last_resource.putChild(path_seg, child_resource)
res_id = _resource_id(last_resource, path_seg)
resource_mappings[res_id] = child_resource
last_resource = child_resource
else:
# we have an existing Resource, use that instead.
res_id = _resource_id(last_resource, path_seg)
last_resource = resource_mappings[res_id]
# ===========================
# now attach the actual desired resource
last_path_seg = full_path.split(b'/')[-1]
# if there is already a resource here, thieve its children and
# replace it
res_id = _resource_id(last_resource, last_path_seg)
if res_id in resource_mappings:
# there is a dummy resource at this path already, which needs
# to be replaced with the desired resource.
existing_dummy_resource = resource_mappings[res_id]
for child_name in existing_dummy_resource.listNames():
child_res_id = _resource_id(
existing_dummy_resource, child_name
)
child_resource = resource_mappings[child_res_id]
# steal the children
res.putChild(child_name, child_resource)
# finally, insert the desired resource in the right place
last_resource.putChild(last_path_seg, res)
res_id = _resource_id(last_resource, last_path_seg)
resource_mappings[res_id] = res
return root_resource
def _resource_id(resource, path_seg):
"""Construct an arbitrary resource ID so you can retrieve the mapping
later.
If you want to represent resource A putChild resource B with path C,
the mapping should looks like _resource_id(A,C) = B.
Args:
resource (Resource): The *parent* Resourceb
path_seg (str): The name of the child Resource to be attached.
Returns:
str: A unique string which can be a key to the child Resource.
"""
return "%s-%s" % (resource, path_seg)
| 40.196078
| 75
| 0.66561
|
fe8ae5a09415d01a11e72d657457cf51cdcf56e7
| 165
|
py
|
Python
|
itlo/admin.py
|
varundey/itlo
|
a7b0d46aa1ce2a0163ba0deda44ee2f9ebac5039
|
[
"MIT"
] | 1
|
2016-07-13T16:15:44.000Z
|
2016-07-13T16:15:44.000Z
|
itlo/admin.py
|
varundey/itlo
|
a7b0d46aa1ce2a0163ba0deda44ee2f9ebac5039
|
[
"MIT"
] | null | null | null |
itlo/admin.py
|
varundey/itlo
|
a7b0d46aa1ce2a0163ba0deda44ee2f9ebac5039
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import State
from .models import Keys
# Register your models here.
admin.site.register(State)
admin.site.register(Keys)
| 27.5
| 32
| 0.812121
|
2fc6374cc3d28df0445ba7399bf6f32d405422a2
| 444
|
py
|
Python
|
addons/mail/wizard/mail_blacklist_remove.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/mail/wizard/mail_blacklist_remove.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/mail/wizard/mail_blacklist_remove.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from odoo import fields, models
class MailBlacklistRemove(models.TransientModel):
_name = 'mail.blacklist.remove'
_description = 'Remove email from blacklist wizard'
email = fields.Char(name="Email", readonly=True, required=True)
reason = fields.Char(name="Reason")
def action_unblacklist_apply(self):
return self.env['mail.blacklist'].action_remove_with_reason(self.email, self.reason)
| 29.6
| 92
| 0.722973
|
dc455c5d60499b8042e72a0e0291dd6cf7a6aff5
| 273
|
py
|
Python
|
test.py
|
Plawn/Fancy_progressbar
|
cda9b59475c8ce2786a46a41d06b33b4cf217f7d
|
[
"Apache-2.0"
] | 2
|
2018-11-26T16:09:15.000Z
|
2021-01-09T14:25:35.000Z
|
test.py
|
Plawn/Fancy_progressbar
|
cda9b59475c8ce2786a46a41d06b33b4cf217f7d
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
Plawn/Fancy_progressbar
|
cda9b59475c8ce2786a46a41d06b33b4cf217f7d
|
[
"Apache-2.0"
] | null | null | null |
import Fancy_progressbar as fp
import random as rd
import time
def get_progress():
return 100 * rd.random()
b = fp.ProgressBar('test', 'kill_when_finished', 'animated')
b.use_progress(get_progress)
bh = fp.ProgressBarHandler([b])
bh.start()
time.sleep(3)
b.finish()
| 18.2
| 60
| 0.736264
|
65a20553c2d44a4a3c2c0c8a7e1f6b7bce2a45a8
| 5,626
|
py
|
Python
|
kaleidoscope.py
|
ES-Alexander/kaleidoscope
|
86d367333acffbe9bfd950f03bd83a0d2b69beab
|
[
"MIT"
] | 1
|
2021-02-25T04:59:17.000Z
|
2021-02-25T04:59:17.000Z
|
kaleidoscope.py
|
ES-Alexander/kaleidoscope
|
86d367333acffbe9bfd950f03bd83a0d2b69beab
|
[
"MIT"
] | null | null | null |
kaleidoscope.py
|
ES-Alexander/kaleidoscope
|
86d367333acffbe9bfd950f03bd83a0d2b69beab
|
[
"MIT"
] | 1
|
2022-01-02T17:12:32.000Z
|
2022-01-02T17:12:32.000Z
|
#!/usr/bin/env python3
import cv2
import numpy as np
def core(img, N, out, r_start, r_out, c_in, c_out, scale):
in_rows, in_cols = img.shape[:2]
if c_in is None:
c_in = (dim // 2 for dim in (in_rows, in_cols))
c_y, c_x = c_in
r_start %= 2 * np.pi
width = np.pi / N
r_end = r_start + width
if out == 'same':
out = np.empty((in_rows, in_cols, 3), dtype=np.uint8)
elif out == 'full':
quarter = np.pi / 2
r_mid = (r_start + r_end) / 2
if 0 <= r_mid < quarter:
dy = in_rows - c_y
dx = in_cols - c_x
elif quarter <= r_mid <= 2 * quarter:
dy = in_rows - c_y
dx = c_x
elif 2 * quarter <= r_mid <= 3 * quarter:
dy = c_y
dx = c_x
else:
dy = c_y
dx = in_cols - c_x
s = int(np.ceil(2 * np.sqrt(dx*dx + dy*dy) * scale))
out = np.empty((s, s, 3), dtype=np.uint8)
else:
out = np.empty((out, out, 3), dtype=np.uint8)
out_rows, out_cols = out.shape[:2]
if c_out is None:
c_out = (dim // 2 for dim in (out_rows, out_cols))
co_y, co_x = c_out
# create sample points and offset to center of output image
Xp, Yp = np.meshgrid(np.arange(out_cols), np.arange(out_rows))
Xp -= co_x
Yp -= co_y
# calculate magnitude and angle of each sample point in input image
mag_p = np.sqrt(Xp*Xp + Yp*Yp) / scale
theta_p = np.abs(((np.arctan2(Xp, Yp) - r_out) % (2 * width)) - width) \
+ r_start
# convert to cartesian sample points in input image, offset by c_in
Y = (mag_p * np.sin(theta_p) + c_y).astype(np.int64)
X = (mag_p * np.cos(theta_p) + c_x).astype(np.int64)
# set outside valid region pixels to black (avoid index error)
# temporarily use pixel [0,0] of input image
old = img[0,0].copy()
img[0,0] = (0, 0, 0)
bad = (Y < 0) | (Y >= in_rows) | (X < 0) | (X >= in_cols)
Y[bad] = 0
X[bad] = 0
# sample input image to set each pixel of out
out[:] = img[Y, X]
img[0,0] = old # restore input [0,0] to its initial value
return out, c_x, c_y, r_start, r_end
def add_annotation(img, c_x, c_y, r_start, r_end):
in_rows, in_cols = img.shape[:2]
# draw a circle at the input c_in
cv2.circle(img, (c_x, c_y), 10, (0,0,255), 2)
# draw lines from c_in to display sample region in input image
l = min(max(c_x, in_cols-c_x), max(c_y, in_rows-c_y)) / 3
cv2.line(img, (c_x, c_y), (int(c_x + l*np.cos(r_start)),
int(c_y + l*np.sin(r_start))),
(255,0,0), 2)
cv2.line(img, (c_x, c_y), (int(c_x + l * np.cos(r_end)),
int(c_y + l * np.sin(r_end))),
(0,255,0), 2)
def kaleido(img, N=10, out='same', r_start=0, r_out=0, c_in=None, c_out=None,
scale=1, annotate=False):
''' Return a kaleidoscope from img, with specified parameters.
'img' is a 3-channel uint8 numpy array of image pixels.
'N' is the number of mirrors.
'out' can be 'same', 'full', or a 3-channel uint8 array to fill.
'r_start' is the selection rotation from the input image [clock radians].
'r_out' is the rotation of the output image result [clock radians].
'c_in' is the origin point of the sample sector from the input image.
If None defaults to the center of the input image [c_y,c_x].
'c_out' is the center of the kaleidoscope in the output image. If None
defaults to the center point of the output image [c_y, c_x].
'scale' is the scale of the output kaleidoscope. Default 1.
'annotate' is a boolean denoting whether to annotate the input image to
display the selected region. Default True.
'''
out, c_x, c_y, r_start, r_end = core(img, N, out, r_start, r_out,
c_in, c_out, scale)
if annotate:
add_annotation(img, c_x, c_y, r_start, r_end)
return out
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-f', '--filename', default='test.png',
help='path to image file')
parser.add_argument('-n', type=int, default=15, help='number of mirrors')
parser.add_argument('-o', '--out', default='full',
choices=('same', 'full'),
help='output size '
'(same as input or full kaleidoscope)')
parser.add_argument('--r_start', type=float, default=0.6,
help='clockwise radians rotation of input image')
parser.add_argument('--r_out', type=float, default=0,
help='clockwise radians rotation of output image')
parser.add_argument('--c_in', nargs=2, type=int,
help='c_y c_x - origin point of the sample sector from'
' the input image')
parser.add_argument('--c_out', nargs=2, type=int,
help='c_y c_x - center point of the kaleidoscope in '
'the output image')
parser.add_argument('-s', '--scale', type=float, default=1,
help='scale of the output kaleidoscope')
parser.add_argument('-a', '--annotate', action='store_true')
args = parser.parse_args()
image = cv2.imread(args.filename)
out = kaleido(image, args.n, args.out, args.r_start, args.r_out, args.c_in,
args.c_out, args.scale, args.annotate)
cv2.imshow('in', image)
cv2.imshow('out', out)
cv2.waitKey()
cv2.destroyAllWindows()
| 38.013514
| 79
| 0.572343
|
354485b6623b608a5ed68237e299682d7e9dcffe
| 12,233
|
py
|
Python
|
examples/demo_all.py
|
hawthorne-stephen-kelley/python-nvd3
|
fd5d82481500dc72a45e254b74977edcfdacf845
|
[
"MIT"
] | 1
|
2019-10-03T23:29:06.000Z
|
2019-10-03T23:29:06.000Z
|
examples/demo_all.py
|
hawthorne-stephen-kelley/python-nvd3
|
fd5d82481500dc72a45e254b74977edcfdacf845
|
[
"MIT"
] | null | null | null |
examples/demo_all.py
|
hawthorne-stephen-kelley/python-nvd3
|
fd5d82481500dc72a45e254b74977edcfdacf845
|
[
"MIT"
] | 2
|
2016-12-29T04:28:38.000Z
|
2019-12-03T02:02:11.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Examples for Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from nvd3 import cumulativeLineChart
from nvd3 import discreteBarChart
from nvd3 import lineChart
from nvd3 import linePlusBarChart
from nvd3 import lineWithFocusChart
from nvd3 import multiBarChart
from nvd3 import multiBarHorizontalChart
from nvd3 import stackedAreaChart
from nvd3 import scatterChart
from nvd3 import pieChart
import random
import datetime
import time
start_time = int(time.mktime(datetime.datetime(2012, 6, 1).timetuple()) * 1000)
nb_element = 100
# Open File for test
output_file = open('test_demo_all.html', 'w')
# ---------------------------------------
html_open = """
<!DOCTYPE html>
<html lang="en">
<head>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<link href="https://cdnjs.cloudflare.com/ajax/libs/nvd3/1.7.1/nv.d3.min.css" rel="stylesheet" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/nvd3/1.7.1/nv.d3.min.js"></script>
</head>
"""
output_file.write(html_open)
type = "discreteBarChart"
chart = discreteBarChart(name='my graphname', height=400, width=800, jquery_on_ready=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
xdata = ["A", "B", "C", "D", "E", "F", "G"]
ydata = [3, 12, -10, 5, 25, -7, 2]
extra_serie = {"tooltip": {"y_start": "", "y_end": " cal"}}
chart.add_serie(y=ydata, x=xdata, extra=extra_serie)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
type = "pie Chart"
chart = pieChart(name=type, color_category='category20c', height=400,
width=400, jquery_on_ready=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
color_list = ['orange', 'yellow', '#C5E946', '#95b43f', 'red', '#FF2259', '#F6A641']
extra_serie = {"tooltip": {"y_start": "", "y_end": " cal"}, "color_list": color_list}
xdata = ["Orange", "Banana", "Pear", "Kiwi", "Apple", "Strawberry", "Pineapple"]
ydata = [3, 4, 2, 1, 5, 7, 3]
chart.add_serie(y=ydata, x=xdata, extra=extra_serie)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
name = "lineChart-different-x-axis"
type = "lineChart"
chart = lineChart(name=name, height=400, width=800, x_is_date=False,
jquery_on_ready=True)
chart.set_containerheader("\n\n<h2>" + name + "</h2>\n\n")
xdata = [1 + x * 2 for x in list(range(nb_element))]
xdata10 = [0 + x * 2 for x in list(range(nb_element))]
ydata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
#Configure a color for a specific serie
kwargs1 = {'color': 'green'}
kwargs2 = {'color': 'red'}
extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " odd"}}
chart.add_serie(name="Odd X-Axis", y=ydata, x=xdata, extra=extra_serie, **kwargs1)
extra_serie = {"tooltip": {"y_start": "", "y_end": " even"}}
chart.add_serie(name="Even X-Axis", y=ydata2, x=xdata10, extra=extra_serie, **kwargs2)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
type = "lineChart"
chart = lineChart(height=400, width=800, x_is_date=True, x_axis_format="%d %b %Y %H",
jquery_on_ready=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
xdata = list(range(nb_element))
xdata = [start_time + x * 1000000000 for x in xdata]
ydata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
# Configure a color for a specific serie
kwargs1 = {'color': 'green'}
kwargs2 = {'color': 'red'}
extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " calls"},
"date_format": "%d %b %Y %I:%M:%S %p"}
chart.add_serie(name="Count", y=ydata, x=xdata, extra=extra_serie, **kwargs1)
extra_serie = {"tooltip": {"y_start": "", "y_end": " min"}}
chart.add_serie(name="Duration", y=ydata2, x=xdata, extra=extra_serie, **kwargs2)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
type = "lineChartWithInteractiveGuideline"
chart = lineChart(name="lineChart-With-Interactive-Guideline",
height=400, width=800, x_is_date=True, x_axis_format="%d %b %Y %H",
jquery_on_ready=True, use_interactive_guideline=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
xdata = list(range(nb_element))
xdata = [start_time + x * 1000000000 for x in xdata]
ydata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
# Configure a color for a specific serie
kwargs1 = {'color': 'green'}
kwargs2 = {'color': 'red'}
extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " calls"},
"date_format": "%d %b %Y %I:%M:%S %p"}
chart.add_serie(name="Count", y=ydata, x=xdata, extra=extra_serie, **kwargs1)
extra_serie = {"tooltip": {"y_start": "", "y_end": " min"}}
chart.add_serie(name="Duration", y=ydata2, x=xdata, extra=extra_serie, **kwargs2)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
type = "lineWithFocusChart"
chart = lineWithFocusChart(color_category='category20b', x_is_date=True,
height=400, width=800,
x_axis_format="%d %b %Y", jquery_on_ready=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
xdata = list(range(nb_element))
xdata = [start_time + x * 1000000000 for x in xdata]
ydata = [i + random.randint(-10, 10) for i in list(range(nb_element))]
ydata2 = [x * 2 for x in ydata]
ydata3 = [x * 3 for x in ydata]
ydata4 = [x * 4 for x in ydata]
extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " calls"},
"date_format": "%d %b %Y %I:%M:%S"}
# extra_serie = None
chart.add_serie(name="serie 1", y=ydata, x=xdata, extra=extra_serie)
chart.add_serie(name="serie 2", y=ydata2, x=xdata, extra=extra_serie)
chart.add_serie(name="serie 3", y=ydata3, x=xdata, extra=extra_serie)
chart.add_serie(name="serie 4", y=ydata4, x=xdata, extra=extra_serie)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
type = "stackedAreaChart"
chart = stackedAreaChart(height=400, width=800, x_is_date=True,
x_axis_format="%d %b %Y %I", jquery_on_ready=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
xdata = list(range(nb_element))
xdata = [start_time + x * 1000000000 for x in xdata]
ydata = [i + random.randint(1, 10) for i in list(range(nb_element))]
ydata2 = [x * 2 for x in ydata]
extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " calls"},
"date_format": "%d %b %Y %I:%M:%S %p"}
chart.add_serie(name="serie 1", y=ydata, x=xdata, extra=extra_serie)
chart.add_serie(name="serie 2", y=ydata2, x=xdata, extra=extra_serie)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
type = "linePlusBarChart"
chart = linePlusBarChart(height=400, width=800, x_is_date=True,
x_axis_format="%d %b %Y", jquery_on_ready=True,
focus_enable=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
xdata = list(range(nb_element))
xdata = [start_time + x * 1000000000 for x in xdata]
ydata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata2 = [i + random.randint(1, 10) for i in reversed(list(range(nb_element)))]
kwargs = {}
kwargs['bar'] = True
extra_serie = {"tooltip": {"y_start": "$ ", "y_end": ""}}
chart.add_serie(name="Count", y=ydata, x=xdata, extra=extra_serie, **kwargs)
extra_serie = {"tooltip": {"y_start": "", "y_end": " min"}}
chart.add_serie(name="Duration", y=ydata2, x=xdata, extra=extra_serie)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
type = "cumulativeLineChart"
chart = cumulativeLineChart(height=400, width=800,
x_is_date=True, x_axis_format="%d %b %Y",
jquery_on_ready=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
xdata = list(range(nb_element))
xdata = [start_time + x * 1000000000 for x in xdata]
ydata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
extra_serie = {"tooltip": {"y_start": "", "y_end": " Calls"}}
chart.add_serie(name="Count", y=ydata, x=xdata, extra=extra_serie)
extra_serie = {"tooltip": {"y_start": "", "y_end": " Min"}}
chart.add_serie(name="Duration", y=ydata2, x=xdata, extra=extra_serie)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
type = "multiBarHorizontalChart"
chart = multiBarHorizontalChart(height=400, width=800, jquery_on_ready=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
nb_element = 10
xdata = list(range(nb_element))
ydata = [random.randint(-10, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
extra_serie = {"tooltip": {"y_start": "", "y_end": " Calls"}}
chart.add_serie(name="Count", y=ydata, x=xdata, extra=extra_serie)
extra_serie = {"tooltip": {"y_start": "", "y_end": " Min"}}
chart.add_serie(name="Duration", y=ydata2, x=xdata, extra=extra_serie)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
type = "multiBarChart"
chart = multiBarChart(height=400, width=800, jquery_on_ready=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
nb_element = 10
xdata = list(range(nb_element))
ydata = [random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
extra_serie = {"tooltip": {"y_start": "", "y_end": " call"}}
chart.add_serie(name="Count", y=ydata, x=xdata, extra=extra_serie)
extra_serie = {"tooltip": {"y_start": "", "y_end": " min"}}
chart.add_serie(name="Duration", y=ydata2, x=xdata, extra=extra_serie)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
type = "multiBarChartDate"
chart = multiBarChart(name=type, height=400, width=800, x_is_date=True, jquery_on_ready=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
nb_element = 100
start_time = int(time.mktime(datetime.datetime(2012, 6, 1).timetuple()) * 1000)
xdata = range(nb_element)
xdata = map(lambda x: start_time + x * 1000000000, xdata)
ydata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata2 = map(lambda x: x * 2, ydata)
tooltip_date = "%d %b %Y %H:%M:%S %p"
extra_serie = {"tooltip": {"y_start": "There are ", "y_end": " calls"},
"date_format": tooltip_date}
chart.add_serie(name="Count", y=ydata, x=xdata, extra=extra_serie)
extra_serie = {"tooltip": {"y_start": "There are ", "y_end": " duration"},
"date_format": tooltip_date}
chart.add_serie(name="Duration", y=ydata2, x=xdata, extra=extra_serie)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
type = "scatterChart"
chart = scatterChart(name=type, height=350, width=800, x_is_date=False)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
nb_element = 50
xdata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata = [i * random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
ydata3 = [x * 5 for x in ydata]
kwargs1 = {'shape': 'circle', 'size': '1'}
kwargs2 = {'shape': 'cross', 'size': '10'}
kwargs3 = {'shape': 'triangle-up', 'size': '100'}
extra_serie = {"tooltip": {"y_start": "", "y_end": " calls"}}
chart.add_serie(name="serie 1", y=ydata, x=xdata, extra=extra_serie, **kwargs1)
chart.add_serie(name="serie 2", y=ydata2, x=xdata, extra=extra_serie, **kwargs2)
chart.add_serie(name="serie 3", y=ydata3, x=xdata, extra=extra_serie, **kwargs3)
chart.buildcontent()
output_file.write(chart.htmlcontent)
# ---------------------------------------
html_close = """</body></html>"""
output_file.write(html_close)
# close Html file
output_file.close()
| 37.182371
| 100
| 0.643015
|
5b52804433b33de7b899971af7c7e8683d2daafc
| 1,102
|
py
|
Python
|
2020/src/day2.py
|
Sujatha-Nagarajan/AdventOfCode
|
afce23c74fd0a72caa29c1604a582b21806e794e
|
[
"CC0-1.0"
] | 1
|
2020-12-05T06:14:37.000Z
|
2020-12-05T06:14:37.000Z
|
2020/src/day2.py
|
Sujatha-Nagarajan/AdventOfCode
|
afce23c74fd0a72caa29c1604a582b21806e794e
|
[
"CC0-1.0"
] | null | null | null |
2020/src/day2.py
|
Sujatha-Nagarajan/AdventOfCode
|
afce23c74fd0a72caa29c1604a582b21806e794e
|
[
"CC0-1.0"
] | null | null | null |
final_valid_count_a = 0
final_valid_count_b = 0
with open("../input/day2.txt","r") as file:
entries = file.readlines()
for e in entries:
items = e.split(' ')
min_value,max_value = items[0].split('-')
letter = items[1][0]
password = items[2]
# first puzzle
# the letter should be present between min and max times in the password string
count=sum(p==letter for p in password)
if count >= int(min_value) and count <= int(max_value):
final_valid_count_a += 1
# second puzzle
# the letter should be present either in pos1 of the password or pos2 of the password
# with index starting from 1
pos1,pos2 = items[0].split('-')
pos1 = int(pos1)
pos2 = int(pos2)
if password[pos1-1] == letter:
if password[pos2-1] != letter:
final_valid_count_b += 1
elif password[pos2-1] == letter:
final_valid_count_b += 1
print("a)", final_valid_count_a)
print("b)", final_valid_count_b)
| 28.25641
| 93
| 0.569873
|
a0925733a5321e428b8a5a06fcfcb13b2cfe2edb
| 1,078
|
py
|
Python
|
mglib/wrapper.py
|
mohammed-Emad/mglib
|
22f5fd6d4a584b01d595c9cc6fffb8fe6b01597c
|
[
"Apache-2.0"
] | 45
|
2021-01-10T14:33:52.000Z
|
2022-03-27T14:02:56.000Z
|
mglib/wrapper.py
|
mohammed-Emad/mglib
|
22f5fd6d4a584b01d595c9cc6fffb8fe6b01597c
|
[
"Apache-2.0"
] | 8
|
2021-01-08T20:02:21.000Z
|
2022-02-01T13:12:24.000Z
|
mglib/wrapper.py
|
mohammed-Emad/mglib
|
22f5fd6d4a584b01d595c9cc6fffb8fe6b01597c
|
[
"Apache-2.0"
] | 12
|
2021-01-07T20:03:07.000Z
|
2022-03-24T04:07:51.000Z
|
import logging
import subprocess
logger = logging.getLogger(__name__)
class Wrapper:
def __init__(self, exec_name, check=True, dry_run=False):
self.exec_name = exec_name
self.check = check,
# usefull for debugging purpose
self.dry_run = dry_run
def get_cmd(self):
cmd = []
if self.exec_name:
cmd.extend([self.exec_name])
return cmd
def call_no_args(self):
cmd = self.get_cmd()
self.run(cmd)
def run(self, cmd):
command_to_run = ' '.join(cmd)
if (self.dry_run):
logger.debug(f"Dry run: {command_to_run}")
logger.debug(f"subprocess: {command_to_run}")
ret = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8"
)
if ret.returncode != 0:
logger.error((
f"returncode={ret.returncode}"
f" stdout={ret.stdout}"
f" stderr={ret.stderr}"
))
return ret
| 21.56
| 61
| 0.538033
|
5e30e4868679bf252840bd67adf964e277a7aa1a
| 3,143
|
py
|
Python
|
process-liuk-entities.py
|
timofeic/life-uk-test
|
9ae14d465eb44d8dc061d15772e569dc9f99197b
|
[
"MIT"
] | null | null | null |
process-liuk-entities.py
|
timofeic/life-uk-test
|
9ae14d465eb44d8dc061d15772e569dc9f99197b
|
[
"MIT"
] | null | null | null |
process-liuk-entities.py
|
timofeic/life-uk-test
|
9ae14d465eb44d8dc061d15772e569dc9f99197b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
import sys
import csv
from ast import literal_eval
def checkScore(value):
if 0 <= value <= 1:
return True
sys.exit("Invalid score, please specify a number between 0 and 1 e.g. 0.855")
# Given a list of words, return a dictionary of
# word-frequency pairs.
# Source: https://programminghistorian.org/en/lessons/counting-frequencies
def wordListToFreqDict(wordlist):
wordfreq = [wordlist.count(p) for p in wordlist]
return dict(zip(wordlist,wordfreq))
# Sort a dictionary of word-frequency pairs in
# order of descending frequency.
# Source: https://programminghistorian.org/en/lessons/counting-frequencies
def sortFreqDict(freqdict):
aux = [(freqdict[key], key) for key in freqdict]
aux.sort()
aux.reverse()
return aux
def main():
common_words = ['UK', 'UKCitizenshipSupport.com', 'UKCitizenshipsupport.com', 'Page',
'Chapter 1', 'Chapter 2', 'Chapter 3', 'Chapter 4', 'Chapter 5',
'United Kingdom', 'British', 'HMSO', 'www.parliament.uk']
if len(sys.argv) == 3:
filename = sys.argv[1]
min_score = float(sys.argv[2])
checkScore(min_score)
textlist = []
print('Opening %s' % filename)
lineList = [line.rstrip('\n') for line in open(filename, "r")]
print(len(lineList))
# Remove last list item, since it's an errorcode from Comprehend.
lineList.pop(-1)
with open('liuk-entities.csv', 'w+') as csvfile:
writer = csv.writer(csvfile)
#Header row
writer.writerow(['Text','Type','Score','File','Line Number'])
for line in lineList:
try:
line_dict = literal_eval(line)
f = line_dict["File"]
line_number = line_dict["Line"]
for entity in line_dict["Entities"]:
score = entity["Score"]
if float(score) <= min_score:
break
text = entity["Text"]
if text in common_words:
break
# Create a list of texts, so that we can count the frequency
textlist.append(text)
t = entity["Type"]
writer.writerow([text,t,score,f,line_number])
except KeyError:
continue
csvfile.close()
print("Writing entities to file.")
dictionary = wordListToFreqDict(textlist)
sorteddict = sortFreqDict(dictionary)
with open('liuk-entities-freqpairs.csv', 'w+') as csvfile2:
writer = csv.writer(csvfile2)
#Header row
writer.writerow(['Frequency', 'Text'])
for row in sorteddict:
writer.writerow(row)
csvfile2.close()
else:
sys.exit("Invalid number of arguments, please run with 'python process-liuk-entities.py <filename> <score>' \
where score is a floating point number between 0 and 1 e.g. 0.855")
if __name__ == '__main__':
main()
| 37.416667
| 117
| 0.571429
|
1658cb38883954c28c8beeb9d862867018aac0bd
| 1,052
|
py
|
Python
|
examples/response/alert_bulk_resolve.py
|
rathnadeep/cbapi-python
|
55375d8796a9d88d00bd16df13d71a2d8d76dd9c
|
[
"MIT"
] | 3
|
2019-01-23T19:11:33.000Z
|
2022-02-25T02:06:51.000Z
|
examples/response/alert_bulk_resolve.py
|
rathnadeep/cbapi-python
|
55375d8796a9d88d00bd16df13d71a2d8d76dd9c
|
[
"MIT"
] | 1
|
2021-03-31T19:51:07.000Z
|
2021-03-31T19:51:07.000Z
|
examples/response/alert_bulk_resolve.py
|
jjfallete/cbapi-python
|
8b92584edca5605cd94afa7656fd04c282b875b8
|
[
"MIT"
] | 1
|
2022-02-25T02:06:52.000Z
|
2022-02-25T02:06:52.000Z
|
#!/usr/bin/env python
import sys
from cbapi.response.models import Alert
from cbapi.example_helpers import build_cli_parser, get_cb_response_object
import time
def main():
parser = build_cli_parser("Bulk resolve alerts")
parser.add_argument("--query", action="store", default="", required=True,
help="The query string of alerts to resolve. All matching alerts will be resolved.")
args = parser.parse_args()
cb = get_cb_response_object(args)
alert_query = cb.select(Alert).where("-status:Resolved")
alert_query = alert_query.where(args.query)
alert_count = len(alert_query)
if alert_count > 0:
print("Resolving {0:d} alerts...".format(len(alert_query)))
alert_query.change_status("Resolved")
print("Waiting for alert changes to take effect...")
time.sleep(25)
print("Complete. Resolved {0:d} alerts.".format(alert_count))
else:
print("Congratulations! You have no unresolved alerts!")
if __name__ == "__main__":
sys.exit(main())
| 28.432432
| 108
| 0.679658
|
447e21655a162991588f07bc7e14884a224f23ed
| 2,320
|
py
|
Python
|
appEngine-DataStore/labs/fortune-teller/start/main.py
|
Federickg23/cssi-labs
|
b5ec2c116c0d075468e8381c6656669f22a60ab3
|
[
"Apache-2.0"
] | null | null | null |
appEngine-DataStore/labs/fortune-teller/start/main.py
|
Federickg23/cssi-labs
|
b5ec2c116c0d075468e8381c6656669f22a60ab3
|
[
"Apache-2.0"
] | null | null | null |
appEngine-DataStore/labs/fortune-teller/start/main.py
|
Federickg23/cssi-labs
|
b5ec2c116c0d075468e8381c6656669f22a60ab3
|
[
"Apache-2.0"
] | 1
|
2018-07-21T00:09:06.000Z
|
2018-07-21T00:09:06.000Z
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import os
import random
def get_fortune():
#add a list of fortunes to the empty fortune_list array
fortune_list=['fortune1', 'fortune2']
#use the random library to return a random element from the array
random_fortune =
return(random_fortune)
#remember, you can get this by searching for jinja2 google app engine
jinja_current_directory = "insert jinja2 environment variable here"
class FortuneHandler(webapp2.RequestHandler):
def get(self):
# In part 2, instead of returning this string,
# make a function call that returns a random fortune.
self.response.write('a response from the FortuneHandler')
#add a post method
#def post(self):
class HelloHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello World. Welcome to the root route of my app')
#the route mapping
app = webapp2.WSGIApplication([
#this line routes the main url ('/') - also know as
#the root route - to the Fortune Handler
('/', HelloHandler),
('/predict', FortuneHandler) #maps '/predict' to the FortuneHandler
], debug=True)
| 34.117647
| 79
| 0.734052
|
862e7b5bc59d3baa59e973e204700047da0f4260
| 7,330
|
py
|
Python
|
python/tvm/topi/cuda/dense.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 1
|
2022-03-06T04:01:26.000Z
|
2022-03-06T04:01:26.000Z
|
python/tvm/topi/cuda/dense.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 6
|
2022-03-23T14:15:51.000Z
|
2022-03-31T14:35:57.000Z
|
python/tvm/topi/cuda/dense.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Schedule for dense operator"""
import logging
import tvm
from tvm import te
import tvm.autotvm as autotvm
from tvm.contrib import cublas
from .tensor_intrin import dp4a
from .. import tag
from .. import generic
from ..utils import traverse_inline, get_const_tuple
logger = logging.getLogger("topi")
def _matmul_cublas_common(
cfg,
tensor_a,
tensor_b,
bias=None,
out_dtype=None,
transpose_a=False,
transpose_b=False,
):
assert len(tensor_a.shape) == 2 and len(tensor_b.shape) == 2, "only support 2-dim matmul"
if bias is not None:
assert len(bias.shape) == 1
if out_dtype is None:
out_dtype = tensor_a.dtype
if out_dtype not in [tensor_a.dtype, "int32"]:
assert out_dtype == tensor_a.dtype, "Mixed precision other than int8 + int32 not supported."
batch, in_dim = get_const_tuple(tensor_a.shape)
out_dim, _ = get_const_tuple(tensor_b.shape)
matmul = cublas.matmul(tensor_a, tensor_b, transpose_a, transpose_b, dtype=out_dtype)
if all(isinstance(d, int) for d in [batch, in_dim, out_dim]):
cfg.add_flop(batch * in_dim * out_dim * 2)
if bias is not None:
matmul = te.compute(
(batch, out_dim), lambda i, j: matmul[i, j] + bias[j], tag=tag.BROADCAST
)
return matmul
@autotvm.register_topi_compute("matmul_cublas.cuda")
def matmul_cublas(
cfg,
tensor_a,
tensor_b,
bias=None,
out_dtype=None,
transpose_a=False,
transpose_b=False,
):
"""Matmul operator on CUDA with CUBLAS"""
return _matmul_cublas_common(cfg, tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b)
@autotvm.register_topi_schedule("matmul_cublas.cuda")
def schedule_matmul_cublas(_, outs):
"""Schedule matmul operator using CUBLAS"""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("dense_cublas.cuda")
def dense_cublas(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator on CUDA with CUBLAS. This is an alias of matmul_nt operator."""
return _matmul_cublas_common(cfg, data, weight, bias, out_dtype, False, True)
@autotvm.register_topi_schedule("dense_cublas.cuda")
def schedule_dense_cublas(_, outs):
"""Schedule dense operator using CUBLAS"""
return generic.schedule_extern(outs)
@autotvm.register_topi_compute("dense_int8.cuda")
def dense_int8(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator for int8 on CUDA"""
if out_dtype is None:
out_dtype = data.dtype
batch, in_dim = get_const_tuple(data.shape)
out_dim, _ = get_const_tuple(weight.shape)
k = te.reduce_axis((0, in_dim), name="k")
matmul = te.compute(
(batch, out_dim),
lambda i, j: te.sum(
data[i, k].astype(out_dtype) * weight[j, k].astype(out_dtype), axis=[k]
),
tag="dense_int8",
)
cfg.add_flop(batch * in_dim * out_dim * 2)
if bias is not None:
matmul = te.compute(
(batch, out_dim),
lambda i, j: matmul[i, j] + bias[j].astype(out_dtype),
tag=tag.BROADCAST,
)
cfg.add_flop(batch * out_dim)
return matmul
@autotvm.register_topi_schedule("dense_int8.cuda")
def schedule_dense_int8(cfg, outs):
"""Dense schedule for int8 on CUDA"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "dense_int8" in op.tag:
_schedule_dense_int8(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_dense_int8(cfg, s, output):
data, weight = s[output].op.input_tensors
if len(weight.op.input_tensors) == 1 and weight.op.input_tensors[0] == data:
s[weight].compute_inline()
batch, in_dim = get_const_tuple(data.shape)
out_dim, _ = get_const_tuple(weight.shape)
in_dim_factor = 4
assert in_dim % in_dim_factor == 0, "Input dimension must divide {}".format(in_dim_factor)
if in_dim % 16 == 0:
in_dim_factor = 16
# create tuning space
cfg.define_split("tile_y", batch, num_outputs=4)
cfg.define_split("tile_x", out_dim, num_outputs=4)
cfg.define_split("tile_k", in_dim // in_dim_factor, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
# create cache stage
AA = s.cache_read(data, "shared", [output])
WW = s.cache_read(weight, "shared", [output])
CC = s.cache_write(output, "local")
# handle bias
if output.op not in s.outputs:
s[output].compute_inline()
output = s.outputs[0].output(0)
n, x = s[output].op.axis
# this is the scope to attach global config inside this kernel
kernel_scope, n = s[output].split(n, nparts=1)
ko = CC.op.reduce_axis[0]
ko, ki = s[CC].split(ko, factor=4)
ko, kt = cfg["tile_k"].apply(s, CC, ko)
target = tvm.target.Target.current(allow_none=False)
do_tensorize = True
if "vulkan" in target.keys:
do_tensorize = "+dotprod" in target.mattr or target.supports_integer_dot_product
if do_tensorize:
dtypes = (data.dtype, weight.dtype)
s[CC].tensorize(ki, dp4a("shared", "shared", "local", dtypes))
by, vy, ty, yi = cfg["tile_y"].apply(s, output, n)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
s[output].reorder(by, bx, vy, vx, ty, tx, yi, xi)
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
n_ty = cfg["tile_y"].size[2]
n_tx = cfg["tile_x"].size[2]
s[CC].compute_at(s[output], tx)
yo, xo = CC.op.axis[:2]
s[CC].reorder(ko, kt, yo, xo, ki)
for load in [AA, WW]:
s[load].compute_at(s[CC], ko)
outer, inner = s[load].split(s[load].op.axis[-1], factor=in_dim_factor)
s[load].vectorize(inner)
fused = s[load].op.axis[:-1] + [outer]
fused = s[load].fuse(*fused)
fused, tx = s[load].split(fused, factor=n_tx)
fused, ty = s[load].split(fused, factor=n_ty)
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[output].pragma(kernel_scope, "unroll_explicit", False)
return s
| 34.252336
| 100
| 0.670941
|
aede9bb9c5474f53763675061161eba6451891a8
| 943
|
py
|
Python
|
0000_book/pickplace_xarm_grasp_planning.py
|
takuya-ki/wrs
|
f6e1009b94332504042fbde9b39323410394ecde
|
[
"MIT"
] | null | null | null |
0000_book/pickplace_xarm_grasp_planning.py
|
takuya-ki/wrs
|
f6e1009b94332504042fbde9b39323410394ecde
|
[
"MIT"
] | null | null | null |
0000_book/pickplace_xarm_grasp_planning.py
|
takuya-ki/wrs
|
f6e1009b94332504042fbde9b39323410394ecde
|
[
"MIT"
] | null | null | null |
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import grasping.planning.antipodal as gpa
import robot_sim.end_effectors.gripper.xarm_gripper.xarm_gripper as xag
base = wd.World(cam_pos=[1, 1, 1], lookat_pos=[0, 0, 0])
gm.gen_frame().attach_to(base)
object_box = cm.gen_box(extent=[.02, .06, .7])
object_box.set_rgba([.7, .5, .3, .7])
object_box.attach_to(base)
# hnd_s
gripper_s = xag.XArmGripper()
grasp_info_list = gpa.plan_grasps(gripper_s, object_box, openning_direction='loc_y', max_samples=7, min_dist_between_sampled_contact_points=.03)
gpa.write_pickle_file('box', grasp_info_list, './', 'xarm_long_box.pickle')
for grasp_info in grasp_info_list:
jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
gripper_s.fix_to(hnd_pos, hnd_rotmat)
gripper_s.jaw_to(jaw_width)
gripper_s.gen_meshmodel().attach_to(base)
base.run()
| 44.904762
| 144
| 0.78685
|
3223f2cea9f4b3be864aec7f2bddeaf4b2132332
| 2,024
|
py
|
Python
|
options/options.py
|
choyingw/CFCNet
|
828e0c09c646a4669685b3d31b8aa0ae2a5cd351
|
[
"MIT"
] | 33
|
2019-11-27T02:09:05.000Z
|
2022-03-08T11:41:28.000Z
|
options/options.py
|
Qiu731/CFCNet
|
7ad0928e52767175669f3c4eb9e88fcbdd2d6b01
|
[
"MIT"
] | 6
|
2019-12-18T08:59:01.000Z
|
2021-07-05T21:11:05.000Z
|
options/options.py
|
Qiu731/CFCNet
|
7ad0928e52767175669f3c4eb9e88fcbdd2d6b01
|
[
"MIT"
] | 6
|
2019-11-20T11:27:17.000Z
|
2022-03-04T10:23:44.000Z
|
from .base_options import BaseOptions
class AdvanceOptions(BaseOptions):
def initialize(self, parser, flag):
parser = BaseOptions.initialize(self, parser)
parser.add_argument('--print_freq', type=int, default=1, help='frequency of showing training results on console')
parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
parser.add_argument('--niter', type=int, default=400, help='# of iter at starting learning rate')
parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate for optimizer')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum factor for SGD')
parser.add_argument('--weight_decay', type=float, default=0.0005, help='momentum factor for optimizer')
parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine')
parser.add_argument('--lr_decay_iters', type=int, default=5000000, help='multiply by a gamma every lr_decay_iters iterations')
parser.add_argument('--lr_decay_epochs', type=int, default=100, help='multiply by a gamma every lr_decay_epoch epochs')
parser.add_argument('--lr_gamma', type=float, default=0.9, help='gamma factor for lr_scheduler')
parser.add_argument('--nP', type=int, default=500, help='number of points')
parser.add_argument('--train_path', help='path to the training dataset')
parser.add_argument('--test_path', help='path to the testing dataset')
self.isTrain = flag
return parser
| 80.96
| 173
| 0.708004
|
12b89f53c67c0d93a0d72c9ec1401de5895b7a5b
| 5,082
|
py
|
Python
|
satellite_constellation/GroundStation.py
|
samsipe/SatelliteConstellationCreator
|
6acc75d652e0d34fa27c1e5bca29beb71a18b0f2
|
[
"MIT"
] | 7
|
2020-08-24T08:19:54.000Z
|
2021-05-24T15:37:36.000Z
|
satellite_constellation/GroundStation.py
|
samsipe/SatelliteConstellationCreator
|
6acc75d652e0d34fa27c1e5bca29beb71a18b0f2
|
[
"MIT"
] | 1
|
2021-05-23T10:31:29.000Z
|
2021-05-23T10:34:02.000Z
|
satellite_constellation/GroundStation.py
|
samsipe/SatelliteConstellationCreator
|
6acc75d652e0d34fa27c1e5bca29beb71a18b0f2
|
[
"MIT"
] | 8
|
2019-12-18T04:06:25.000Z
|
2021-08-22T08:42:21.000Z
|
import warnings
class GroundStation(object):
def __init__(self, name, lat, long, elevation, beam_width):
self.__name = name
self.__lat = lat
self.__long = long
self.__elevation = elevation
self.__beam = beam_width
@property
def name(self):
return self.__name
@name.setter
def name(self, new_name):
self.__name = new_name
@property
def lat(self):
return self.__lat
@lat.setter
def lat(self, new_lat):
if (new_lat < -90) or (new_lat > 90):
return ValueError("Latitude must be between -90 and 90")
else:
self.__lat = new_lat
@property
def long(self):
return self.__long
@long.setter
def long(self, new_long):
if (new_long < -180) or (new_long > 180):
return ValueError("Longitude must be between -180 and 180")
else:
self.__long = new_long
@property
def elevation(self):
return self.__elevation
@elevation.setter
def elevation(self, new_elev):
if new_elev < 0:
return ValueError("Elevation must be above 0")
if new_elev > 8900:
return ValueError("Elevation must be on the ground")
else:
self.__elevation = new_elev
@property
def beam(self):
return self.__beam
@beam.setter
def beam(self, new_beam):
if (new_beam < 0) or (new_beam > 180):
return ValueError("Beam width must be between 0 and 180 degrees")
self.__beam = new_beam
def as_xml(self):
warnings.warn("XML support is depreciated and not supported from PIGI 0.8.5 onward", DeprecationWarning)
return '\t\t<Entity Type="GroundStation" Name="{0}">\n' \
'\t\t\t<PropertySection Name="UserProperties">\n' \
'\t\t\t\t<FloatPropertyValue name="Latitude" value="{1}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Longitude" value="{2}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Elevation" value="{3}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="BeamWidth" value="{4}"/>\n' \
'\t\t\t\t<StringPropertyValue name="PlanetName" value="Earth"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Animation">\n' \
'\t\t\t\t<ArrayPropertyValue name="Position" value="[6339.69, -699.193, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Orientation" value="[1, 0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Scale" value="[1, 1, 1]"/>\n' \
'\t\t\t\t<EnumPropertyValue name="Debug" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Time Input">\n' \
'\t\t\t\t<TimestampPropertyValue name="Timepoint" value="2016-May-07 08:32:21.059611"/>\n' \
'\t\t\t\t<DurationPropertyValue name="Duration" value="2"/>\n' \
'\t\t\t\t<DurationPropertyValue name="StartOffset" value="-1"/>\n' \
'\t\t\t\t<DurationPropertyValue name="Timestep" value="0.000694444"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Favourite">\n' \
'\t\t\t\t<EnumPropertyValue name="favourite" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Mesh">\n' \
'\t\t\t\t<ArrayPropertyValue name="Position" value="[0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Orientation" value="[1, 0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Scale" value="[500, 500, 500]"/>\n' \
'\t\t\t\t<StringPropertyValue name="name" value="GroundStation.mesh"/>\n' \
'\t\t\t\t<EnumPropertyValue name="Debug" value="0"/>\n' \
'\t\t\t\t<StringPropertyValue name="group" value="SolarSystem"/>\n' \
'\t\t\t\t<EnumPropertyValue name="visibility" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Look Angles">\n' \
'\t\t\t\t<ArrayPropertyValue name="LatLon" value="[0, 0]"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Elevation" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t</Entity>\n'.format(self.name, self.lat, self.long, self.elevation, self.beam)
def __repr__(self):
return "{0}, {1}, {2}, {3}, {4}".format(self.name, self.lat, self.long, self.elevation, self.beam)
def __str__(self):
return "Ground Station: {0}\n" \
"Latitude: {1}, Longitude: {2}" \
"Elevation: {3}, Beam Width: [4}".format(self.name, self.lat, self.long, self.elevation, self.beam)
def as_dict(self):
return {"Name": self.name,
"Latitude": self.lat,
"Longitude": self.long,
"Elevation": self.elevation,
"Beam Width": self.beam,
"Type": 'station'}
| 42.35
| 114
| 0.547816
|
ba08f3f74db2cba5f0ba8ce0da9cec12bbf51da9
| 2,509
|
py
|
Python
|
viewwork/forms.py
|
pikhovkin/django-viewwork
|
65597f599ecbd0def37b04eae77017fb8f5dfbe6
|
[
"MIT"
] | 4
|
2021-12-12T03:56:05.000Z
|
2022-01-10T08:54:31.000Z
|
viewwork/forms.py
|
pikhovkin/django-viewwork
|
65597f599ecbd0def37b04eae77017fb8f5dfbe6
|
[
"MIT"
] | null | null | null |
viewwork/forms.py
|
pikhovkin/django-viewwork
|
65597f599ecbd0def37b04eae77017fb8f5dfbe6
|
[
"MIT"
] | null | null | null |
from operator import attrgetter, itemgetter
import sys
from django import forms
from django.apps import apps
from django.conf import settings as dj_settings
from django.db.models import BLANK_CHOICE_DASH
from django.utils.translation import gettext_lazy as _, get_language
from modeltrans.utils import build_localized_fieldname
try:
from easy_select2 import Select2
except (ImportError, ModuleNotFoundError):
Select2 = forms.Select
from . import BaseViewWork, settings
from .models import Menu
__all__ = (
'MenuAdminForm',
)
class MenuAdminForm(forms.ModelForm):
view = forms.ChoiceField(label=_('View'), required=False, widget=Select2())
def __init__(self, *args, **kwargs):
super(MenuAdminForm, self).__init__(*args, **kwargs)
view_choices= []
vw_prefix = attrgetter('vw_prefix')
item1 = itemgetter(1)
for app_label, opts in BaseViewWork.vw.items():
app = apps.get_app_config(app_label)
urls = sys.modules[f'{app.module.__name__}.urls']
namespace = getattr(urls, 'app_name', None) or app.module.__name__
namespace = f'{namespace}:' if settings.USE_APP_NAMESPACE else ''
views = filter(lambda v: not vw_prefix(v[1]) or not v[0].startswith(vw_prefix(v[1])), opts.items())
views = list(map(item1, views))
options = [(f'{namespace}{v.vw_name}', f'{v.vw_verbose_name}: {v.vw_name}') for v in views]
options.sort(key=item1)
view_choices.append((app.verbose_name, options))
view_choices.sort(key=lambda opt: opt[0])
view_choices.insert(0, BLANK_CHOICE_DASH[0])
self.fields['view'].choices = view_choices
self.fields['parent'].queryset = Menu.objects.filter(view='').order_by('name_i18n')
self.fields['name_i18n'].required = True
class Meta:
model = Menu
fields = '__all__'
exclude = ('name',)
widgets = {
'parent': Select2()
}
@staticmethod
def _strip(value):
return (value or '').strip()
def clean_name_i18n(self):
return self._strip(self.cleaned_data.get('name_i18n'))
def clean(self):
cleaned_data = super().clean()
for lang in {get_language(), dj_settings.LANGUAGE_CODE}:
field_name = build_localized_fieldname('name', lang)
cleaned_data[field_name] = self._strip(cleaned_data.get(field_name)) or cleaned_data.get('name_i18n', '')
return cleaned_data
| 34.369863
| 117
| 0.658828
|
7b07fb8f0c9d0d66ddfbb77d100a56d1ee4ba1b3
| 7,040
|
py
|
Python
|
chatbotv2/my_seq2seq.py
|
drpreetyrai/ChatBotCourse
|
352a6672a19d3692c884ff376cbe067fff37e210
|
[
"MIT"
] | 5,087
|
2016-06-28T08:58:30.000Z
|
2020-08-18T08:49:46.000Z
|
chatbotv2/my_seq2seq.py
|
tta1993/ChatBotCourse
|
156041d51ec51842592e8a1eeda565197fe31aec
|
[
"MIT"
] | 28
|
2016-10-26T08:01:18.000Z
|
2019-10-22T06:41:21.000Z
|
chatbotv2/my_seq2seq.py
|
tta1993/ChatBotCourse
|
156041d51ec51842592e8a1eeda565197fe31aec
|
[
"MIT"
] | 1,691
|
2016-06-29T07:33:36.000Z
|
2020-08-18T08:50:02.000Z
|
# -*- coding: utf-8 -*-
import sys
import math
import tflearn
import tensorflow as tf
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import rnn
import chardet
import numpy as np
import struct
seq = []
max_w = 50
float_size = 4
word_vector_dict = {}
word_vec_dim = 200
max_seq_len = 16
def load_vectors(input):
"""从vectors.bin加载词向量,返回一个word_vector_dict的词典,key是词,value是200维的向量
"""
print "begin load vectors"
input_file = open(input, "rb")
# 获取词表数目及向量维度
words_and_size = input_file.readline()
words_and_size = words_and_size.strip()
words = long(words_and_size.split(' ')[0])
size = long(words_and_size.split(' ')[1])
print "words =", words
print "size =", size
for b in range(0, words):
a = 0
word = ''
# 读取一个词
while True:
c = input_file.read(1)
word = word + c
if False == c or c == ' ':
break
if a < max_w and c != '\n':
a = a + 1
word = word.strip()
vector = []
for index in range(0, size):
m = input_file.read(float_size)
(weight,) = struct.unpack('f', m)
vector.append(float(weight))
# 将词及其对应的向量存到dict中
#word_vector_dict[word.decode('utf-8')] = vector
word_vector_dict[word.decode('utf-8')] = vector[0:word_vec_dim]
input_file.close()
print "load vectors finish"
def init_seq():
"""读取切好词的文本文件,加载全部词序列
"""
file_object = open('zhenhuanzhuan.segment', 'r')
vocab_dict = {}
while True:
line = file_object.readline()
if line:
for word in line.decode('utf-8').split(' '):
if word_vector_dict.has_key(word):
seq.append(word_vector_dict[word])
else:
break
file_object.close()
def vector_sqrtlen(vector):
len = 0
for item in vector:
len += item * item
len = math.sqrt(len)
return len
def vector_cosine(v1, v2):
if len(v1) != len(v2):
sys.exit(1)
sqrtlen1 = vector_sqrtlen(v1)
sqrtlen2 = vector_sqrtlen(v2)
value = 0
for item1, item2 in zip(v1, v2):
value += item1 * item2
return value / (sqrtlen1*sqrtlen2)
def vector2word(vector):
max_cos = -10000
match_word = ''
for word in word_vector_dict:
v = word_vector_dict[word]
cosine = vector_cosine(vector, v)
if cosine > max_cos:
max_cos = cosine
match_word = word
return (match_word, max_cos)
class MySeq2Seq(object):
"""
思路:输入输出序列一起作为input,然后通过slick和unpack切分
完全按照论文说的编码器解码器来做
输出的时候把解码器的输出按照词向量的200维展平,这样输出就是(?,seqlen*200)
这样就可以通过regression来做回归计算了,输入的y也展平,保持一致
"""
def __init__(self, max_seq_len = 16, word_vec_dim = 200):
self.max_seq_len = max_seq_len
self.word_vec_dim = word_vec_dim
def generate_trainig_data(self):
load_vectors("./vectors.bin")
init_seq()
xy_data = []
y_data = []
for i in range(30,40,10):
# 问句、答句都是16字,所以取32个
start = i*self.max_seq_len*2
middle = i*self.max_seq_len*2 + self.max_seq_len
end = (i+1)*self.max_seq_len*2
sequence_xy = seq[start:end]
sequence_y = seq[middle:end]
print "right answer"
for w in sequence_y:
(match_word, max_cos) = vector2word(w)
print match_word
sequence_y = [np.ones(self.word_vec_dim)] + sequence_y
xy_data.append(sequence_xy)
y_data.append(sequence_y)
return np.array(xy_data), np.array(y_data)
def model(self, feed_previous=False):
# 通过输入的XY生成encoder_inputs和带GO头的decoder_inputs
input_data = tflearn.input_data(shape=[None, self.max_seq_len*2, self.word_vec_dim], dtype=tf.float32, name = "XY")
encoder_inputs = tf.slice(input_data, [0, 0, 0], [-1, self.max_seq_len, self.word_vec_dim], name="enc_in")
decoder_inputs_tmp = tf.slice(input_data, [0, self.max_seq_len, 0], [-1, self.max_seq_len-1, self.word_vec_dim], name="dec_in_tmp")
go_inputs = tf.ones_like(decoder_inputs_tmp)
go_inputs = tf.slice(go_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim])
decoder_inputs = tf.concat(1, [go_inputs, decoder_inputs_tmp], name="dec_in")
# 编码器
# 把encoder_inputs交给编码器,返回一个输出(预测序列的第一个值)和一个状态(传给解码器)
(encoder_output_tensor, states) = tflearn.lstm(encoder_inputs, self.word_vec_dim, return_state=True, scope='encoder_lstm')
encoder_output_sequence = tf.pack([encoder_output_tensor], axis=1)
# 解码器
# 预测过程用前一个时间序的输出作为下一个时间序的输入
# 先用编码器的最后一个输出作为第一个输入
if feed_previous:
first_dec_input = go_inputs
else:
first_dec_input = tf.slice(decoder_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim])
decoder_output_tensor = tflearn.lstm(first_dec_input, self.word_vec_dim, initial_state=states, return_seq=False, reuse=False, scope='decoder_lstm')
decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1)
decoder_output_sequence_list = [decoder_output_tensor]
# 再用解码器的输出作为下一个时序的输入
for i in range(self.max_seq_len-1):
if feed_previous:
next_dec_input = decoder_output_sequence_single
else:
next_dec_input = tf.slice(decoder_inputs, [0, i+1, 0], [-1, 1, self.word_vec_dim])
decoder_output_tensor = tflearn.lstm(next_dec_input, self.word_vec_dim, return_seq=False, reuse=True, scope='decoder_lstm')
decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1)
decoder_output_sequence_list.append(decoder_output_tensor)
decoder_output_sequence = tf.pack(decoder_output_sequence_list, axis=1)
real_output_sequence = tf.concat(1, [encoder_output_sequence, decoder_output_sequence])
net = tflearn.regression(real_output_sequence, optimizer='sgd', learning_rate=0.1, loss='mean_square')
model = tflearn.DNN(net)
return model
def train(self):
trainXY, trainY = self.generate_trainig_data()
model = self.model(feed_previous=False)
model.fit(trainXY, trainY, n_epoch=1000, snapshot_epoch=False)
model.save('./model/model')
return model
def load(self):
model = self.model(feed_previous=True)
model.load('./model/model')
return model
if __name__ == '__main__':
phrase = sys.argv[1]
my_seq2seq = MySeq2Seq(word_vec_dim=word_vec_dim, max_seq_len=max_seq_len)
if phrase == 'train':
my_seq2seq.train()
else:
model = my_seq2seq.load()
trainXY, trainY = my_seq2seq.generate_trainig_data()
predict = model.predict(trainXY)
for sample in predict:
print "predict answer"
for w in sample[1:]:
(match_word, max_cos) = vector2word(w)
print match_word, max_cos
| 33.684211
| 155
| 0.62642
|
94b0e997afafff1613db1148e1b4d042248ffd80
| 1,477
|
py
|
Python
|
utils/code_generator/time_measurement/phase_2/postprocess_programs.py
|
zehor-l/tiramisu
|
225b30d3495bee52c0e06ed7794f402f2e58de73
|
[
"MIT"
] | 23
|
2017-05-03T13:06:34.000Z
|
2018-06-07T07:12:43.000Z
|
utils/code_generator/time_measurement/phase_2/postprocess_programs.py
|
zehor-l/tiramisu
|
225b30d3495bee52c0e06ed7794f402f2e58de73
|
[
"MIT"
] | 2
|
2017-04-25T08:59:09.000Z
|
2017-05-11T16:41:55.000Z
|
utils/code_generator/time_measurement/phase_2/postprocess_programs.py
|
zehor-l/tiramisu
|
225b30d3495bee52c0e06ed7794f402f2e58de73
|
[
"MIT"
] | 5
|
2017-02-16T14:26:40.000Z
|
2018-05-30T16:49:27.000Z
|
"""
Fuse the files generated by the execution jobs to a single file.
The final file will be a list, where each entry has the following format :
(function_id, schedule_id, list of execution times, the median, the speedup)
"""
import pickle
import numpy as np
from pathlib import Path
# Path to the files generated by the execution jobs
src_path = Path("results/parts/")
# Path to the file that will contain all the execution files
dst_path = "results/final_exec_times.pickle"
final_exec_times = []
# Fuse all execution times to a single list
for file_path in src_path.iterdir():
if file_path.name.startswith("final_exec_times"):
with open(file_path, "rb") as f:
final_exec_times.extend(pickle.load(f))
# Compute the medians
final_exec_times_median = []
for x in final_exec_times:
func_id, sched_id, e = x
final_exec_times_median.append((func_id, sched_id, e, np.median(e)))
# Compute the speedups
ref_progs = dict()
for x in final_exec_times_median:
func_id, sched_id, _, median = x
if sched_id.endswith("no_schedule"):
ref_progs[func_id] = median
final_exec_times_median_speedup = []
for x in final_exec_times_median:
func_id, sched_id, e, median = x
speedup = float(ref_progs[func_id] / median)
final_exec_times_median_speedup.append((func_id, sched_id, e, median, speedup))
# Save results
with open(dst_path, "wb") as f:
pickle.dump(final_exec_times_median_speedup, f)
| 28.403846
| 83
| 0.723087
|
4556cee23ff776bf8806e5d9263552e62030f0a5
| 21,965
|
py
|
Python
|
venv/lib/python3.9/site-packages/google/cloud/tasks_v2/types/target.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 39
|
2020-04-17T09:54:54.000Z
|
2022-03-23T12:28:07.000Z
|
venv/lib/python3.9/site-packages/google/cloud/tasks_v2/types/target.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 80
|
2020-02-04T00:48:43.000Z
|
2022-03-31T22:59:41.000Z
|
venv/lib/python3.9/site-packages/google/cloud/tasks_v2/types/target.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 9
|
2020-02-06T01:45:11.000Z
|
2021-04-14T08:38:08.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.tasks.v2",
manifest={
"HttpMethod",
"HttpRequest",
"AppEngineHttpRequest",
"AppEngineRouting",
"OAuthToken",
"OidcToken",
},
)
class HttpMethod(proto.Enum):
r"""The HTTP method used to deliver the task."""
HTTP_METHOD_UNSPECIFIED = 0
POST = 1
GET = 2
HEAD = 3
PUT = 4
DELETE = 5
PATCH = 6
OPTIONS = 7
class HttpRequest(proto.Message):
r"""HTTP request.
The task will be pushed to the worker as an HTTP request. If the
worker or the redirected worker acknowledges the task by returning a
successful HTTP response code ([``200`` - ``299``]), the task will
be removed from the queue. If any other HTTP response code is
returned or no response is received, the task will be retried
according to the following:
- User-specified throttling: [retry
configuration][google.cloud.tasks.v2.Queue.retry_config], [rate
limits][google.cloud.tasks.v2.Queue.rate_limits], and the
[queue's state][google.cloud.tasks.v2.Queue.state].
- System throttling: To prevent the worker from overloading, Cloud
Tasks may temporarily reduce the queue's effective rate.
User-specified settings will not be changed.
System throttling happens because:
- Cloud Tasks backs off on all errors. Normally the backoff
specified in [rate
limits][google.cloud.tasks.v2.Queue.rate_limits] will be used.
But if the worker returns ``429`` (Too Many Requests), ``503``
(Service Unavailable), or the rate of errors is high, Cloud Tasks
will use a higher backoff rate. The retry specified in the
``Retry-After`` HTTP response header is considered.
- To prevent traffic spikes and to smooth sudden increases in
traffic, dispatches ramp up slowly when the queue is newly
created or idle and if large numbers of tasks suddenly become
available to dispatch (due to spikes in create task rates, the
queue being unpaused, or many tasks that are scheduled at the
same time).
Attributes:
url (str):
Required. The full url path that the request will be sent
to.
This string must begin with either "http://" or "https://".
Some examples are: ``http://acme.com`` and
``https://acme.com/sales:8080``. Cloud Tasks will encode
some characters for safety and compatibility. The maximum
allowed URL length is 2083 characters after encoding.
The ``Location`` header response from a redirect response
[``300`` - ``399``] may be followed. The redirect is not
counted as a separate attempt.
http_method (google.cloud.tasks_v2.types.HttpMethod):
The HTTP method to use for the request. The
default is POST.
headers (Sequence[google.cloud.tasks_v2.types.HttpRequest.HeadersEntry]):
HTTP request headers.
This map contains the header field names and values. Headers
can be set when the [task is
created][google.cloud.tasks.v2beta3.CloudTasks.CreateTask].
These headers represent a subset of the headers that will
accompany the task's HTTP request. Some HTTP request headers
will be ignored or replaced.
A partial list of headers that will be ignored or replaced
is:
- Host: This will be computed by Cloud Tasks and derived
from
[HttpRequest.url][google.cloud.tasks.v2.HttpRequest.url].
- Content-Length: This will be computed by Cloud Tasks.
- User-Agent: This will be set to ``"Google-Cloud-Tasks"``.
- X-Google-\*: Google use only.
- X-AppEngine-\*: Google use only.
``Content-Type`` won't be set by Cloud Tasks. You can
explicitly set ``Content-Type`` to a media type when the
[task is
created][google.cloud.tasks.v2beta3.CloudTasks.CreateTask].
For example, ``Content-Type`` can be set to
``"application/octet-stream"`` or ``"application/json"``.
Headers which can have multiple values (according to
RFC2616) can be specified using comma-separated values.
The size of the headers must be less than 80KB.
body (bytes):
HTTP request body.
A request body is allowed only if the [HTTP
method][google.cloud.tasks.v2.HttpRequest.http_method] is
POST, PUT, or PATCH. It is an error to set body on a task
with an incompatible
[HttpMethod][google.cloud.tasks.v2.HttpMethod].
oauth_token (google.cloud.tasks_v2.types.OAuthToken):
If specified, an `OAuth
token <https://developers.google.com/identity/protocols/OAuth2>`__
will be generated and attached as an ``Authorization``
header in the HTTP request.
This type of authorization should generally only be used
when calling Google APIs hosted on \*.googleapis.com.
oidc_token (google.cloud.tasks_v2.types.OidcToken):
If specified, an
`OIDC <https://developers.google.com/identity/protocols/OpenIDConnect>`__
token will be generated and attached as an ``Authorization``
header in the HTTP request.
This type of authorization can be used for many scenarios,
including calling Cloud Run, or endpoints where you intend
to validate the token yourself.
"""
url = proto.Field(proto.STRING, number=1,)
http_method = proto.Field(proto.ENUM, number=2, enum="HttpMethod",)
headers = proto.MapField(proto.STRING, proto.STRING, number=3,)
body = proto.Field(proto.BYTES, number=4,)
oauth_token = proto.Field(
proto.MESSAGE, number=5, oneof="authorization_header", message="OAuthToken",
)
oidc_token = proto.Field(
proto.MESSAGE, number=6, oneof="authorization_header", message="OidcToken",
)
class AppEngineHttpRequest(proto.Message):
r"""App Engine HTTP request.
The message defines the HTTP request that is sent to an App Engine
app when the task is dispatched.
Using
[AppEngineHttpRequest][google.cloud.tasks.v2.AppEngineHttpRequest]
requires
```appengine.applications.get`` <https://cloud.google.com/appengine/docs/admin-api/access-control>`__
Google IAM permission for the project and the following scope:
``https://www.googleapis.com/auth/cloud-platform``
The task will be delivered to the App Engine app which belongs to
the same project as the queue. For more information, see `How
Requests are
Routed <https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed>`__
and how routing is affected by `dispatch
files <https://cloud.google.com/appengine/docs/python/config/dispatchref>`__.
Traffic is encrypted during transport and never leaves Google
datacenters. Because this traffic is carried over a communication
mechanism internal to Google, you cannot explicitly set the protocol
(for example, HTTP or HTTPS). The request to the handler, however,
will appear to have used the HTTP protocol.
The [AppEngineRouting][google.cloud.tasks.v2.AppEngineRouting] used
to construct the URL that the task is delivered to can be set at the
queue-level or task-level:
- If [app_engine_routing_override is set on the
queue][Queue.app_engine_routing_override], this value is used for
all tasks in the queue, no matter what the setting is for the
[task-level
app_engine_routing][AppEngineHttpRequest.app_engine_routing].
The ``url`` that the task will be sent to is:
- ``url =`` [host][google.cloud.tasks.v2.AppEngineRouting.host]
``+``
[relative_uri][google.cloud.tasks.v2.AppEngineHttpRequest.relative_uri]
Tasks can be dispatched to secure app handlers, unsecure app
handlers, and URIs restricted with
```login: admin`` <https://cloud.google.com/appengine/docs/standard/python/config/appref>`__.
Because tasks are not run as any user, they cannot be dispatched to
URIs restricted with
```login: required`` <https://cloud.google.com/appengine/docs/standard/python/config/appref>`__
Task dispatches also do not follow redirects.
The task attempt has succeeded if the app's request handler returns
an HTTP response code in the range [``200`` - ``299``]. The task
attempt has failed if the app's handler returns a non-2xx response
code or Cloud Tasks does not receive response before the
[deadline][google.cloud.tasks.v2.Task.dispatch_deadline]. Failed
tasks will be retried according to the [retry
configuration][google.cloud.tasks.v2.Queue.retry_config]. ``503``
(Service Unavailable) is considered an App Engine system error
instead of an application error and will cause Cloud Tasks' traffic
congestion control to temporarily throttle the queue's dispatches.
Unlike other types of task targets, a ``429`` (Too Many Requests)
response from an app handler does not cause traffic congestion
control to throttle the queue.
Attributes:
http_method (google.cloud.tasks_v2.types.HttpMethod):
The HTTP method to use for the request. The default is POST.
The app's request handler for the task's target URL must be
able to handle HTTP requests with this http_method,
otherwise the task attempt will fail with error code 405
(Method Not Allowed). See `Writing a push task request
handler <https://cloud.google.com/appengine/docs/java/taskqueue/push/creating-handlers#writing_a_push_task_request_handler>`__
and the documentation for the request handlers in the
language your app is written in e.g. `Python Request
Handler <https://cloud.google.com/appengine/docs/python/tools/webapp/requesthandlerclass>`__.
app_engine_routing (google.cloud.tasks_v2.types.AppEngineRouting):
Task-level setting for App Engine routing.
- If [app_engine_routing_override is set on the
queue][Queue.app_engine_routing_override], this value is
used for all tasks in the queue, no matter what the
setting is for the [task-level
app_engine_routing][AppEngineHttpRequest.app_engine_routing].
relative_uri (str):
The relative URI.
The relative URI must begin with "/" and must be
a valid HTTP relative URI. It can contain a path
and query string arguments. If the relative URI
is empty, then the root path "/" will be used.
No spaces are allowed, and the maximum length
allowed is 2083 characters.
headers (Sequence[google.cloud.tasks_v2.types.AppEngineHttpRequest.HeadersEntry]):
HTTP request headers.
This map contains the header field names and values. Headers
can be set when the [task is
created][google.cloud.tasks.v2.CloudTasks.CreateTask].
Repeated headers are not supported but a header value can
contain commas.
Cloud Tasks sets some headers to default values:
- ``User-Agent``: By default, this header is
``"AppEngine-Google; (+http://code.google.com/appengine)"``.
This header can be modified, but Cloud Tasks will append
``"AppEngine-Google; (+http://code.google.com/appengine)"``
to the modified ``User-Agent``.
If the task has a
[body][google.cloud.tasks.v2.AppEngineHttpRequest.body],
Cloud Tasks sets the following headers:
- ``Content-Type``: By default, the ``Content-Type`` header
is set to ``"application/octet-stream"``. The default can
be overridden by explicitly setting ``Content-Type`` to a
particular media type when the [task is
created][google.cloud.tasks.v2.CloudTasks.CreateTask].
For example, ``Content-Type`` can be set to
``"application/json"``.
- ``Content-Length``: This is computed by Cloud Tasks. This
value is output only. It cannot be changed.
The headers below cannot be set or overridden:
- ``Host``
- ``X-Google-\*``
- ``X-AppEngine-\*``
In addition, Cloud Tasks sets some headers when the task is
dispatched, such as headers containing information about the
task; see `request
headers <https://cloud.google.com/tasks/docs/creating-appengine-handlers#reading_request_headers>`__.
These headers are set only when the task is dispatched, so
they are not visible when the task is returned in a Cloud
Tasks response.
Although there is no specific limit for the maximum number
of headers or the size, there is a limit on the maximum size
of the [Task][google.cloud.tasks.v2.Task]. For more
information, see the
[CreateTask][google.cloud.tasks.v2.CloudTasks.CreateTask]
documentation.
body (bytes):
HTTP request body.
A request body is allowed only if the HTTP method is POST or
PUT. It is an error to set a body on a task with an
incompatible [HttpMethod][google.cloud.tasks.v2.HttpMethod].
"""
http_method = proto.Field(proto.ENUM, number=1, enum="HttpMethod",)
app_engine_routing = proto.Field(
proto.MESSAGE, number=2, message="AppEngineRouting",
)
relative_uri = proto.Field(proto.STRING, number=3,)
headers = proto.MapField(proto.STRING, proto.STRING, number=4,)
body = proto.Field(proto.BYTES, number=5,)
class AppEngineRouting(proto.Message):
r"""App Engine Routing.
Defines routing characteristics specific to App Engine - service,
version, and instance.
For more information about services, versions, and instances see `An
Overview of App
Engine <https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine>`__,
`Microservices Architecture on Google App
Engine <https://cloud.google.com/appengine/docs/python/microservices-on-app-engine>`__,
`App Engine Standard request
routing <https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed>`__,
and `App Engine Flex request
routing <https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed>`__.
Using [AppEngineRouting][google.cloud.tasks.v2.AppEngineRouting]
requires
```appengine.applications.get`` <https://cloud.google.com/appengine/docs/admin-api/access-control>`__
Google IAM permission for the project and the following scope:
``https://www.googleapis.com/auth/cloud-platform``
Attributes:
service (str):
App service.
By default, the task is sent to the service which is the
default service when the task is attempted.
For some queues or tasks which were created using the App
Engine Task Queue API,
[host][google.cloud.tasks.v2.AppEngineRouting.host] is not
parsable into
[service][google.cloud.tasks.v2.AppEngineRouting.service],
[version][google.cloud.tasks.v2.AppEngineRouting.version],
and
[instance][google.cloud.tasks.v2.AppEngineRouting.instance].
For example, some tasks which were created using the App
Engine SDK use a custom domain name; custom domains are not
parsed by Cloud Tasks. If
[host][google.cloud.tasks.v2.AppEngineRouting.host] is not
parsable, then
[service][google.cloud.tasks.v2.AppEngineRouting.service],
[version][google.cloud.tasks.v2.AppEngineRouting.version],
and
[instance][google.cloud.tasks.v2.AppEngineRouting.instance]
are the empty string.
version (str):
App version.
By default, the task is sent to the version which is the
default version when the task is attempted.
For some queues or tasks which were created using the App
Engine Task Queue API,
[host][google.cloud.tasks.v2.AppEngineRouting.host] is not
parsable into
[service][google.cloud.tasks.v2.AppEngineRouting.service],
[version][google.cloud.tasks.v2.AppEngineRouting.version],
and
[instance][google.cloud.tasks.v2.AppEngineRouting.instance].
For example, some tasks which were created using the App
Engine SDK use a custom domain name; custom domains are not
parsed by Cloud Tasks. If
[host][google.cloud.tasks.v2.AppEngineRouting.host] is not
parsable, then
[service][google.cloud.tasks.v2.AppEngineRouting.service],
[version][google.cloud.tasks.v2.AppEngineRouting.version],
and
[instance][google.cloud.tasks.v2.AppEngineRouting.instance]
are the empty string.
instance (str):
App instance.
By default, the task is sent to an instance which is
available when the task is attempted.
Requests can only be sent to a specific instance if `manual
scaling is used in App Engine
Standard <https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine?hl=en_US#scaling_types_and_instance_classes>`__.
App Engine Flex does not support instances. For more
information, see `App Engine Standard request
routing <https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed>`__
and `App Engine Flex request
routing <https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed>`__.
host (str):
Output only. The host that the task is sent to.
The host is constructed from the domain name of the app
associated with the queue's project ID (for example
.appspot.com), and the
[service][google.cloud.tasks.v2.AppEngineRouting.service],
[version][google.cloud.tasks.v2.AppEngineRouting.version],
and
[instance][google.cloud.tasks.v2.AppEngineRouting.instance].
Tasks which were created using the App Engine SDK might have
a custom domain name.
For more information, see `How Requests are
Routed <https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed>`__.
"""
service = proto.Field(proto.STRING, number=1,)
version = proto.Field(proto.STRING, number=2,)
instance = proto.Field(proto.STRING, number=3,)
host = proto.Field(proto.STRING, number=4,)
class OAuthToken(proto.Message):
r"""Contains information needed for generating an `OAuth
token <https://developers.google.com/identity/protocols/OAuth2>`__.
This type of authorization should generally only be used when
calling Google APIs hosted on \*.googleapis.com.
Attributes:
service_account_email (str):
`Service account
email <https://cloud.google.com/iam/docs/service-accounts>`__
to be used for generating OAuth token. The service account
must be within the same project as the queue. The caller
must have iam.serviceAccounts.actAs permission for the
service account.
scope (str):
OAuth scope to be used for generating OAuth
access token. If not specified,
"https://www.googleapis.com/auth/cloud-platform"
will be used.
"""
service_account_email = proto.Field(proto.STRING, number=1,)
scope = proto.Field(proto.STRING, number=2,)
class OidcToken(proto.Message):
r"""Contains information needed for generating an `OpenID Connect
token <https://developers.google.com/identity/protocols/OpenIDConnect>`__.
This type of authorization can be used for many scenarios, including
calling Cloud Run, or endpoints where you intend to validate the
token yourself.
Attributes:
service_account_email (str):
`Service account
email <https://cloud.google.com/iam/docs/service-accounts>`__
to be used for generating OIDC token. The service account
must be within the same project as the queue. The caller
must have iam.serviceAccounts.actAs permission for the
service account.
audience (str):
Audience to be used when generating OIDC
token. If not specified, the URI specified in
target will be used.
"""
service_account_email = proto.Field(proto.STRING, number=1,)
audience = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 44.9182
| 143
| 0.660187
|
751adf6af2a7e2de015bee1d3a74ee2af03688fb
| 411
|
py
|
Python
|
Singleton/singleton.py
|
mst-solar-car/kicad-bom-generator
|
2aae905056d06f3d25343a8d784049c141d05640
|
[
"MIT"
] | 3
|
2018-02-26T12:31:41.000Z
|
2020-10-10T14:14:11.000Z
|
Singleton/singleton.py
|
mst-solar-car/kicad-bom-generator
|
2aae905056d06f3d25343a8d784049c141d05640
|
[
"MIT"
] | null | null | null |
Singleton/singleton.py
|
mst-solar-car/kicad-bom-generator
|
2aae905056d06f3d25343a8d784049c141d05640
|
[
"MIT"
] | null | null | null |
class Singleton:
def __init__(self, decorated):
self._decorated = decorated
self._instance = None
def __call__(self, *args, **kwargs):
""" Return the singleton instance """
if self._instance is None:
self._instance = self._decorated(*args, **kwargs)
return self._instance
def Reset(self):
""" Clears a singleton object, only used in tests """
self._instance = None
| 21.631579
| 57
| 0.6618
|
091b5e9b96508a816c9224f0d251dea6d7d36f80
| 15,457
|
py
|
Python
|
capture.py
|
RMichaelSwan/tele-capture
|
d8ebb6886a7b46c19f677e6a017051ca5f8a7eea
|
[
"MIT"
] | 2
|
2021-02-17T18:28:31.000Z
|
2021-02-18T13:40:15.000Z
|
capture.py
|
RMichaelSwan/tele-capture
|
d8ebb6886a7b46c19f677e6a017051ca5f8a7eea
|
[
"MIT"
] | 1
|
2021-02-18T13:39:54.000Z
|
2021-02-18T23:06:50.000Z
|
capture.py
|
RMichaelSwan/telecap
|
d8ebb6886a7b46c19f677e6a017051ca5f8a7eea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
A script to capture the speaker audio and screen
TODO: Get ROS audio stream
TODO: provide ROS video stream
"""
import numpy as np
import cv2
import time
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Wnck', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk, Gdk, Wnck, GdkPixbuf
def windows2png():
"""Capture all windows and save to file
"""
screen = Gdk.Screen.get_default()
for i, window in enumerate(screen.get_window_stack()):
pb = Gdk.pixbuf_get_from_window(window, *window.get_geometry())
pb.savev("{}.png".format(i), "png", (), ())
def array_from_pixbuf(p):
"""Convert from GdkPixbuf to numpy array"
Args:
p (GdkPixbuf): The GdkPixbuf provided from some window handle
Returns:
ndarray: The numpy array arranged for the pixels in height, width, RGBA order
"""
w,h,c,r=(p.get_width(), p.get_height(), p.get_n_channels(), p.get_rowstride())
assert p.get_colorspace() == GdkPixbuf.Colorspace.RGB
assert p.get_bits_per_sample() == 8
if p.get_has_alpha():
assert c == 4
else:
assert c == 3
assert r >= w * c
a=np.frombuffer(p.get_pixels(),dtype=np.uint8)
if a.shape[0] == w*c*h:
return a.reshape( (h, w, c), order = 'C' )
else:
b=np.zeros((h,w*c),'uint8')
for j in range(h):
b[j,:]=a[r*j:r*j+w*c]
return b.reshape( (h, w, c) )
def array_to_cv2(pix_array, colorspace=cv2.COLOR_RGB2BGR):
return cv2.cvtColor(pix_array, colorspace)
class WindowCapture():
def __init__(self, window_title="Zoom Meeting", activation_delay = 0.1):
"""Captures from GUI window of specified title. Serves as a handle on
that window as long as it is still available. If the window is
covered up by another window after init finishes, the window can
still be captured from.
Thie captures from specific Gtk windows using Wnck, which means
it is likely only compatible on Linux gui environments.
Wnck API Reference: https://lazka.github.io/pgi-docs/Wnck-3.0/classes/Screen.html
Gdk API Reference: https://lazka.github.io/pgi-docs/Gdk-3.0/classes/Screen.html
Args:
window_title (str, optional): Name of the window to capture from.
Defaults to "Zoom Meeting".
activation_delay (float, optional): How long in seconds to wait for
a window to be activated so that the correct handle will be
acquired. If the wrong window is captured, increase this number.
"""
self.window, self.window_title, self.activation_delay = None, None, None
Gtk.init([]) # necessary since we're not using a Gtk.main() loop
self.set_window(window_title=window_title,activation_delay=activation_delay)
def _get_active_window_(self):
"""Gets a handle on the active Gdk window.
Returns:
Gdk.Window: The window handle.
"""
screen = Gdk.Screen.get_default()
return screen.get_active_window()
def _activate_window_(self):
"""This function uses Wnck to set a window as the active/focused window on
the user's screen. It is especially useful when combined with Gdk to
get active window pixels. Note: Gdk has no direct way of accessing a
window by title.
Returns:
bool: True if window found and activated, else False
"""
screen = Wnck.Screen.get_default()
screen.force_update() # recommended per Wnck documentation
found_win = False
for window in screen.get_windows():
name = window.get_name()
if (name == self.window_title):
found_win = True
window.activate(0)
time.sleep(self.activation_delay)
##should be the proper solution but Wnck/Pygi is kinda trash;
##the active boolean never returns true and proper activate times fail.
# window.activate(int(time.time()))
# while(not window.is_active()):
# time.sleep(self.activation_delay)
break
# clean up Wnck (saves resources, check documentation)
window = None
screen = None
Wnck.shutdown()
return found_win
def set_window(self, window_title, activation_delay = 0.1):
"""Set the window we are going to capture from
Args:
window_title ([type]): [description]
activation_delay (float, optional): How long in seconds to wait for
a window to be activated so that the correct handle will be
acquired. Some computers may take longer to do this. Defaults to 0.1.
NOTE: This delay is a workaround for Wnck's inability to get window state.
Raises:
ValueError: [description]
"""
self.window_title = window_title
self.activation_delay = activation_delay
if (self._activate_window_()):
self.window = self._get_active_window_()
else:
raise ValueError(f'Unable to get/find window with title "{self.window_title}" for capture.')
def test_fps(self, frames = 100):
"""Gives an estimate of the frames per second capture rate
Args:
frames (int, optional): Number of frames to aquire for test. Defaults to 100.
Returns:
float: The determined FPS for capture.
"""
i = 1
start = time.time()
while (i <= frames):
pb = Gdk.pixbuf_get_from_window(self.window, *self.window.get_geometry())
i += 1
fps = (frames / (time.time() - start))
print("--- achieved %s fps ---" % fps)
return fps
def get_cv_img(self):
"""Get a new frame from window in OpenCV format.
Returns:
cv::_OutputArray: Most recent window frame in cv output format.
"""
return cv2.cvtColor(self.get_numpy_img(), cv2.COLOR_RGB2BGR) # cv2 works in BGR colorspace
def get_numpy_img(self):
"""Get a new frame from window as a Numpy array in RGBA format
Returns:
ndarray: (Height, Width, Channels) shaped array
"""
pb = Gdk.pixbuf_get_from_window(self.window, *self.window.get_geometry())
return array_from_pixbuf(pb)
def save_window_png(self, filename = "0"):
"""Save current window frame to png
Args:
filename (str, optional): Filename to save as (without extension). Defaults to "0".
"""
pb = Gdk.pixbuf_get_from_window(self.window, *self.window.get_geometry())
pb.savev(f"{filename}.png", "png", (), ())
class PartcipantDetector():
def __init__(self, epsilon = 0.01, detect_rate = 1, display_box = False, debug = False,
gallery_color_rgba = np.array([26,26,26,255]), active_color_rgba = np.array([35,217,89,255]),
crop_percentX = 0.99, crop_percentY = 0.89, aspectRatio = 1.77):
"""Detects participants in a video call and provides frames/video from
that call. Current model assumes use of Zoom in gallery mode.
#TODO separate detection code from box detection code (make use of detect_rate)
#TODO optimize this for frequent function calls/video streaming.
Possible options for optimization include: scaling down image before
running box detection, only running detection when major image changes
occur.
Args:
epsilon (float, optional): How sensitive the box detector is.
A higher value results in boxes with imperfections being detected
more often. Defaults to 0.1.
detect_rate (int, optional): How often in Hz to check participant
window positions and active participant. Defaults to 1 Hz.
display_box (bool, optional): Whether to display detection boxes
around frames. Defaults to False.
debug (bool, optional): Activates a number of debug outputs
gallery_color_rgba (ndarray, optional): The RGBA color surrounding the participant window.
Defaults to np.array([26,26,26,255]) which is the Zoom default.
active_color_rgba (ndarray, optional): The primary color of the box
that highlights the active user.. Defaults to np.array([35,217,89,255]).
crop_percentX (float, optional): How much extra content to keep
from image window width-wise; lower percents remove more from
the right side. Defaults to 0.99.
crop_percentY (float, optional): How much extra content to keep
from image window height-wise; lower percents remove more from
the bottom side. Defaults to 0.89.
aspectRatio (float, optional): The predicted aspect ratio of
participant videos. Used to filter out false positives. Defaults to 1.77.
"""
self.epsilon = epsilon
self.detect_rate = detect_rate
self.display_box = display_box
self.debug = debug
self.gallery_color = gallery_color_rgba
self.active_color = active_color_rgba
self.crop_percentX = crop_percentX
self.crop_percentY = crop_percentY
self.aspectRatio = aspectRatio
def detect(self, image):
"""Do a detection on the provided image.
Args:
image (ndarray): The RGBA image to work off of.
The array is considered mutable while the internal array values
are expected to be readonly.
Returns:
list[ndarray]: A list of participant sub-images if any, else empty list.
int: Index of the active participant in previous list
"""
#crop out extra bars around window edges
image = image[0:int(image.shape[0]*self.crop_percentY),
0:int(image.shape[1]*self.crop_percentX),...]
if self.debug: print(f"Cropped image array shape: {image.shape}")
img = array_to_cv2(image,cv2.COLOR_RGB2BGR)
masked = image.copy()
gal_mask = (image == self.gallery_color).all(-1)
masked[gal_mask] = 255
masked[np.logical_not(gal_mask)] = 0
##We can find the active partipant using the color...
# active_mask = (image == self.active_color).all(-1)
# masked[...,:4][active_mask] = [0,0,0,0]
#reference: https://stackoverflow.com/a/11427501/5715374
gray = array_to_cv2(masked,cv2.COLOR_RGB2GRAY)
if self.debug: cv2.imshow("Full", gray)
_,thresh = cv2.threshold(gray,127,255,1)
contours,_ = cv2.findContours(thresh,cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
rects = [] # a list of found rects and their area
#find the interesting rectangles
for cnt in contours:
approx = cv2.approxPolyDP(cnt,self.epsilon*cv2.arcLength(cnt,True),True)
if len(approx)==4: #rectangles have 4 sides
area = cv2.contourArea(cnt)
# print(cv2.contourArea(cnt))
if area < 100: #ignore tiny rectangles
continue
_,_,w,h = cv2.boundingRect(cnt)
ar = w/h
if (ar > 3*self.aspectRatio or ar < self.aspectRatio/3): #bad aspect ratio
if self.debug: print(f"rejected aspect ratio of {ar}")
continue
if self.debug: print(f"Aspect ratio = {ar}")
rects.append((cnt,area))
# Find the active participant--they should have the largest box area due to their window highlight
# contours_active = sorted(rects,key=cv2.contourArea,reverse=True) # an alternate method that always puts the active participant first by ignoring original order
# active_participant_index = sorted(inds,key=areas, reverse=True)[0]
participants = []
active_participant_index = self._get_active_rect_(rects)
for i in range(len(rects)):
x,y,w,h = cv2.boundingRect(rects[i][0])
participants.append(img[y:y+h, x:x+w]) #crop participant image to bounding box.
if self.display_box:
if i == active_participant_index:
color = (0,255,0)
else:
color = (0,0,255)
cv2.drawContours(img,[rects[i][0]],0,color,2)
if self.debug: print(f"Detected {len(rects)} participants.")
return participants, active_participant_index
def _get_active_rect_(self, contours):
"""Find the active contour in a list. The current algorithm makes use of
the largest window area, which works on the assumption that the active
participant is highlighted and thus has a larger detected window size.
Args:
contours (list[(contour,int)]): A list containing tuples of contours with their associated area
Returns:
int: Index of the active contour in the list of contours
"""
greatestArea = 0
activeInd = -1
for i, cont in enumerate(contours):
if abs(cont[1]) > greatestArea:
greatestArea = cont[1]
activeInd = i
return activeInd
def test_pipeline_fps(wc, pd, frames = 100):
"""Gives an estimate of the frames per second capture rate
for the image capture and box detection pipeline
Args:
wc (WindowCapture): A handle on the current window capture
pd (Participant): A handle on the participant detector
frames (int, optional): Number of frames to aquire for test. Defaults to 100.
Returns:
float: The determined FPS for capture.
"""
i = 1
start = time.time()
while (i <= frames):
pd.detect_new(wc.get_numpy_img())
i += 1
fps = (frames / (time.time() - start))
print("--- achieved %s fps ---" % fps)
return fps
def test():
x = WindowCapture(window_title="Zoom Meeting", activation_delay=0.1)
# x.test_fps()
detector = PartcipantDetector(debug=True, display_box=False)
# test_pipeline_fps(x,detector)
while(True):
participants = detector.detect(x.get_numpy_img())
for i, part in enumerate(participants):
cv2.imshow(f"Participant {i}",part)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# cv2.waitKey(0)
cv2.destroyAllWindows()
x = None
#TODO: publish separate audio, video, and info streams with same topic order, with arrays of participants for each
def main():
x = WindowCapture(window_title="Zoom Meeting", activation_delay=0.1)
detector = PartcipantDetector(debug=False, display_box=True)
while(True):
participants, activeInd = detector.detect(x.get_numpy_img())
for i, part in enumerate(participants):
cv2.imshow(f"Participant {i}",part)
#TODO insert ROS code and publish here
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
detector = None
x = None
if __name__ == "__main__":
main()
| 40.891534
| 169
| 0.612991
|
88a4a400c21837c7e7a6b59d1b1dcd3fc7be1648
| 23,177
|
py
|
Python
|
lib/tests/streamlit/Server_test.py
|
gzzo/streamlit
|
c587e0e36a829cbd2133adeb5f20c1874f32880b
|
[
"Apache-2.0"
] | null | null | null |
lib/tests/streamlit/Server_test.py
|
gzzo/streamlit
|
c587e0e36a829cbd2133adeb5f20c1874f32880b
|
[
"Apache-2.0"
] | null | null | null |
lib/tests/streamlit/Server_test.py
|
gzzo/streamlit
|
c587e0e36a829cbd2133adeb5f20c1874f32880b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Server.py unit tests"""
import unittest
import mock
import pytest
import tornado.testing
import tornado.web
import tornado.websocket
import errno
from mock import MagicMock
from mock import patch
from tornado import gen
import streamlit.server.Server
from streamlit import config
from streamlit.ReportSession import ReportSession
from streamlit.UploadedFileManager import UploadedFile
from streamlit.server.Server import MAX_PORT_SEARCH_RETRIES
from streamlit.ForwardMsgCache import ForwardMsgCache
from streamlit.ForwardMsgCache import populate_hash_if_needed
from streamlit.elements import data_frame_proto
from streamlit.proto.BlockPath_pb2 import BlockPath
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.server.Server import State
from streamlit.server.Server import start_listening
from streamlit.server.Server import RetriesExceeded
from streamlit.server.routes import DebugHandler
from streamlit.server.routes import HealthHandler
from streamlit.server.routes import MessageCacheHandler
from streamlit.server.routes import MetricsHandler
from streamlit.server.server_util import is_cacheable_msg
from streamlit.server.server_util import is_url_from_allowed_origins
from streamlit.server.server_util import serialize_forward_msg
from tests.ServerTestCase import ServerTestCase
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
def _create_dataframe_msg(df, id=1):
msg = ForwardMsg()
msg.metadata.delta_id = id
msg.metadata.parent_block.container = BlockPath.SIDEBAR
data_frame_proto.marshall_data_frame(df, msg.delta.new_element.data_frame)
return msg
def _create_report_finished_msg(status):
msg = ForwardMsg()
msg.report_finished = status
return msg
class ServerTest(ServerTestCase):
_next_report_id = 0
@tornado.testing.gen_test
def test_start_stop(self):
"""Test that we can start and stop the server."""
with self._patch_report_session():
yield self.start_server_loop()
self.assertEqual(State.WAITING_FOR_FIRST_BROWSER, self.server._state)
yield self.ws_connect()
self.assertEqual(State.ONE_OR_MORE_BROWSERS_CONNECTED, self.server._state)
self.server.stop()
self.assertEqual(State.STOPPING, self.server._state)
yield gen.sleep(0.1)
self.assertEqual(State.STOPPED, self.server._state)
@tornado.testing.gen_test
def test_websocket_connect(self):
"""Test that we can connect to the server via websocket."""
with self._patch_report_session():
yield self.start_server_loop()
self.assertFalse(self.server.browser_is_connected)
# Open a websocket connection
ws_client = yield self.ws_connect()
self.assertTrue(self.server.browser_is_connected)
# Get this client's SessionInfo object
self.assertEqual(1, len(self.server._session_info_by_id))
session_info = list(self.server._session_info_by_id.values())[0]
# Close the connection
ws_client.close()
yield gen.sleep(0.1)
self.assertFalse(self.server.browser_is_connected)
# Ensure ReportSession.shutdown() was called, and that our
# SessionInfo was cleared.
session_info.session.shutdown.assert_called_once()
self.assertEqual(0, len(self.server._session_info_by_id))
@tornado.testing.gen_test
def test_multiple_connections(self):
"""Test multiple websockets can connect simultaneously."""
with self._patch_report_session():
yield self.start_server_loop()
self.assertFalse(self.server.browser_is_connected)
# Open a websocket connection
ws_client1 = yield self.ws_connect()
self.assertTrue(self.server.browser_is_connected)
# Open another
ws_client2 = yield self.ws_connect()
self.assertTrue(self.server.browser_is_connected)
# Assert that our session_infos are sane
session_infos = list(self.server._session_info_by_id.values())
self.assertEqual(2, len(session_infos))
self.assertNotEqual(
session_infos[0].session.id, session_infos[1].session.id,
)
# Close the first
ws_client1.close()
yield gen.sleep(0.1)
self.assertTrue(self.server.browser_is_connected)
# Close the second
ws_client2.close()
yield gen.sleep(0.1)
self.assertFalse(self.server.browser_is_connected)
@tornado.testing.gen_test
def test_websocket_compression(self):
with self._patch_report_session():
yield self.start_server_loop()
# Connect to the server, and explicitly request compression.
ws_client = yield tornado.websocket.websocket_connect(
self.get_ws_url("/stream"), compression_options={}
)
# Ensure that the "permessage-deflate" extension is returned
# from the server.
extensions = ws_client.headers.get("Sec-Websocket-Extensions")
self.assertIn("permessage-deflate", extensions)
@tornado.testing.gen_test
def test_websocket_compression_disabled(self):
with self._patch_report_session():
config._set_option("server.enableWebsocketCompression", False, "test")
yield self.start_server_loop()
# Connect to the server, and explicitly request compression.
ws_client = yield tornado.websocket.websocket_connect(
self.get_ws_url("/stream"), compression_options={}
)
# Ensure that the "Sec-Websocket-Extensions" header is not
# present in the response from the server.
self.assertIsNone(ws_client.headers.get("Sec-Websocket-Extensions"))
@tornado.testing.gen_test
def test_forwardmsg_hashing(self):
"""Test that outgoing ForwardMsgs contain hashes."""
with self._patch_report_session():
yield self.start_server_loop()
ws_client = yield self.ws_connect()
# Get the server's socket and session for this client
session_info = list(self.server._session_info_by_id.values())[0]
# Create a message and ensure its hash is unset; we're testing
# that _send_message adds the hash before it goes out.
msg = _create_dataframe_msg([1, 2, 3])
msg.ClearField("hash")
self.server._send_message(session_info, msg)
received = yield self.read_forward_msg(ws_client)
self.assertEqual(populate_hash_if_needed(msg), received.hash)
@tornado.testing.gen_test
def test_forwardmsg_cacheable_flag(self):
"""Test that the metadata.cacheable flag is set properly on outgoing
ForwardMsgs."""
with self._patch_report_session():
yield self.start_server_loop()
ws_client = yield self.ws_connect()
# Get the server's socket and session for this client
session_info = list(self.server._session_info_by_id.values())[0]
config._set_option("global.minCachedMessageSize", 0, "test")
cacheable_msg = _create_dataframe_msg([1, 2, 3])
self.server._send_message(session_info, cacheable_msg)
received = yield self.read_forward_msg(ws_client)
self.assertTrue(cacheable_msg.metadata.cacheable)
self.assertTrue(received.metadata.cacheable)
config._set_option("global.minCachedMessageSize", 1000, "test")
cacheable_msg = _create_dataframe_msg([4, 5, 6])
self.server._send_message(session_info, cacheable_msg)
received = yield self.read_forward_msg(ws_client)
self.assertFalse(cacheable_msg.metadata.cacheable)
self.assertFalse(received.metadata.cacheable)
@tornado.testing.gen_test
def test_duplicate_forwardmsg_caching(self):
"""Test that duplicate ForwardMsgs are sent only once."""
with self._patch_report_session():
config._set_option("global.minCachedMessageSize", 0, "test")
yield self.start_server_loop()
ws_client = yield self.ws_connect()
# Get the server's socket and session for this client
session_info = list(self.server._session_info_by_id.values())[0]
msg1 = _create_dataframe_msg([1, 2, 3], 1)
# Send the message, and read it back. It will not have been cached.
self.server._send_message(session_info, msg1)
uncached = yield self.read_forward_msg(ws_client)
self.assertEqual("delta", uncached.WhichOneof("type"))
msg2 = _create_dataframe_msg([1, 2, 3], 123)
# Send an equivalent message. This time, it should be cached,
# and a "hash_reference" message should be received instead.
self.server._send_message(session_info, msg2)
cached = yield self.read_forward_msg(ws_client)
self.assertEqual("ref_hash", cached.WhichOneof("type"))
# We should have the *hash* of msg1 and msg2:
self.assertEqual(msg1.hash, cached.ref_hash)
self.assertEqual(msg2.hash, cached.ref_hash)
# And the same *metadata* as msg2:
self.assertEqual(msg2.metadata, cached.metadata)
@tornado.testing.gen_test
def test_cache_clearing(self):
"""Test that report_run_count is incremented when a report
finishes running.
"""
with self._patch_report_session():
config._set_option("global.minCachedMessageSize", 0, "test")
config._set_option("global.maxCachedMessageAge", 1, "test")
yield self.start_server_loop()
yield self.ws_connect()
session = list(self.server._session_info_by_id.values())[0]
data_msg = _create_dataframe_msg([1, 2, 3])
def finish_report(success):
status = (
ForwardMsg.FINISHED_SUCCESSFULLY
if success
else ForwardMsg.FINISHED_WITH_COMPILE_ERROR
)
finish_msg = _create_report_finished_msg(status)
self.server._send_message(session, finish_msg)
def is_data_msg_cached():
return self.server._message_cache.get_message(data_msg.hash) is not None
def send_data_msg():
self.server._send_message(session, data_msg)
# Send a cacheable message. It should be cached.
send_data_msg()
self.assertTrue(is_data_msg_cached())
# End the report with a compile error. Nothing should change;
# compile errors don't increase the age of items in the cache.
finish_report(False)
self.assertTrue(is_data_msg_cached())
# End the report successfully. Nothing should change, because
# the age of the cached message is now 1.
finish_report(True)
self.assertTrue(is_data_msg_cached())
# Send the message again. This should reset its age to 0 in the
# cache, so it won't be evicted when the report next finishes.
send_data_msg()
self.assertTrue(is_data_msg_cached())
# Finish the report. The cached message age is now 1.
finish_report(True)
self.assertTrue(is_data_msg_cached())
# Finish again. The cached message age will be 2, and so it
# should be evicted from the cache.
finish_report(True)
self.assertFalse(is_data_msg_cached())
@tornado.testing.gen_test
def test_uploaded_file_triggers_rerun(self):
"""Uploading a file should trigger a re-run in the associated
ReportSession."""
with self._patch_report_session():
yield self.start_server_loop()
# Connect twice and get associated ReportSessions
yield self.ws_connect()
yield self.ws_connect()
session_info1 = list(self.server._session_info_by_id.values())[0]
session_info2 = list(self.server._session_info_by_id.values())[1]
file = UploadedFile("file.txt", b"123")
# "Upload a file" for Session1
self.server._uploaded_file_mgr.add_files(
session_id=session_info1.session.id,
widget_id="widget_id",
files=[file],
)
self.assertEqual(
self.server._uploaded_file_mgr.get_files(
session_info1.session.id, "widget_id"
),
[file],
)
# Session1 should have a rerun request; Session2 should not
session_info1.session.request_rerun.assert_called_once()
session_info2.session.request_rerun.assert_not_called()
@tornado.testing.gen_test
def test_orphaned_upload_file_deletion(self):
"""An uploaded file with no associated ReportSession should be
deleted."""
with self._patch_report_session():
yield self.start_server_loop()
yield self.ws_connect()
# "Upload a file" for a session that doesn't exist
self.server._uploaded_file_mgr.add_files(
session_id="no_such_session",
widget_id="widget_id",
files=[UploadedFile("file.txt", b"123")],
)
self.assertIsNone(
self.server._uploaded_file_mgr.get_files("no_such_session", "widget_id")
)
@staticmethod
def _create_mock_report_session(*args, **kwargs):
"""Create a mock ReportSession. Each mocked instance will have
its own unique ID."""
mock_id = mock.PropertyMock(
return_value="mock_id:%s" % ServerTest._next_report_id
)
ServerTest._next_report_id += 1
mock_session = mock.MagicMock(ReportSession, autospec=True, *args, **kwargs)
type(mock_session).id = mock_id
return mock_session
def _patch_report_session(self):
"""Mock the Server's ReportSession import. We don't want
actual sessions to be instantiated, or scripts to be run.
"""
return mock.patch(
"streamlit.server.Server.ReportSession",
# new_callable must return a function, not an object, or else
# there will only be a single ReportSession mock. Hence the lambda.
new_callable=lambda: self._create_mock_report_session,
)
class ServerUtilsTest(unittest.TestCase):
def test_is_url_from_allowed_origins_allowed_domains(self):
self.assertTrue(is_url_from_allowed_origins("localhost"))
self.assertTrue(is_url_from_allowed_origins("127.0.0.1"))
def test_is_url_from_allowed_origins_CORS_off(self):
with patch(
"streamlit.server.server_util.config.get_option", side_effect=[False]
):
self.assertTrue(is_url_from_allowed_origins("does not matter"))
def test_is_url_from_allowed_origins_s3_bucket(self):
with patch(
"streamlit.server.server_util.config.get_option",
side_effect=[True, "mybucket"],
):
self.assertTrue(is_url_from_allowed_origins("mybucket"))
def test_is_url_from_allowed_origins_browser_serverAddress(self):
with patch(
"streamlit.server.server_util.config.is_manually_set", side_effect=[True]
), patch(
"streamlit.server.server_util.config.get_option",
side_effect=[True, "browser.server.address"],
):
self.assertTrue(is_url_from_allowed_origins("browser.server.address"))
def test_is_url_from_allowed_origins_s3_url(self):
with patch(
"streamlit.server.server_util.config.is_manually_set", side_effect=[True]
), patch(
"streamlit.server.server_util.config.get_option",
side_effect=[True, "s3.amazon.com"],
):
self.assertTrue(is_url_from_allowed_origins("s3.amazon.com"))
def test_should_cache_msg(self):
"""Test server_util.should_cache_msg"""
config._set_option("global.minCachedMessageSize", 0, "test")
self.assertTrue(is_cacheable_msg(_create_dataframe_msg([1, 2, 3])))
config._set_option("global.minCachedMessageSize", 1000, "test")
self.assertFalse(is_cacheable_msg(_create_dataframe_msg([1, 2, 3])))
def test_should_limit_msg_size(self):
# Set up a 60MB ForwardMsg string
large_msg = _create_dataframe_msg([1, 2, 3])
large_msg.delta.new_element.markdown.body = "X" * 60 * 1000 * 1000
# Create a copy, since serialize_forward_msg modifies the original proto
large_msg_copy = ForwardMsg()
large_msg_copy.CopyFrom(large_msg)
deserialized_msg = ForwardMsg()
deserialized_msg.ParseFromString(serialize_forward_msg(large_msg_copy))
# The metadata should be the same, but contents should be replaced
self.assertEqual(deserialized_msg.metadata, large_msg.metadata)
self.assertNotEqual(deserialized_msg, large_msg)
expected = "Data of size 60.0MB exceeds write limit of 50.0MB"
self.assertEqual(deserialized_msg.delta.new_element.exception.message, expected)
class HealthHandlerTest(tornado.testing.AsyncHTTPTestCase):
"""Tests the /healthz endpoint"""
def setUp(self):
super(HealthHandlerTest, self).setUp()
self._is_healthy = True
def is_healthy(self):
return self._is_healthy
def get_app(self):
return tornado.web.Application(
[(r"/healthz", HealthHandler, dict(callback=self.is_healthy))]
)
def test_healthz(self):
response = self.fetch("/healthz")
self.assertEqual(200, response.code)
self.assertEqual(b"ok", response.body)
self._is_healthy = False
response = self.fetch("/healthz")
self.assertEqual(503, response.code)
def test_healthz_without_csrf(self):
config._set_option("server.enableXsrfProtection", False, "test")
response = self.fetch("/healthz")
self.assertEqual(200, response.code)
self.assertEqual(b"ok", response.body)
self.assertNotIn("Set-Cookie", response.headers)
def test_healthz_with_csrf(self):
config._set_option("server.enableXsrfProtection", True, "test")
response = self.fetch("/healthz")
self.assertEqual(200, response.code)
self.assertEqual(b"ok", response.body)
self.assertIn("Set-Cookie", response.headers)
class PortRotateAHundredTest(unittest.TestCase):
"""Tests port rotation handles a MAX_PORT_SEARCH_RETRIES attempts then sys exits"""
def get_httpserver(self):
httpserver = mock.MagicMock()
httpserver.listen = mock.Mock()
httpserver.listen.side_effect = OSError(errno.EADDRINUSE, "test", "asd")
return httpserver
def test_rotates_a_hundred_ports(self):
app = mock.MagicMock()
RetriesExceeded = streamlit.server.Server.RetriesExceeded
with pytest.raises(RetriesExceeded) as pytest_wrapped_e:
with patch.object(
tornado.httpserver, "HTTPServer", return_value=self.get_httpserver()
) as mock_server:
start_listening(app)
self.assertEqual(pytest_wrapped_e.type, SystemExit)
self.assertEqual(pytest_wrapped_e.value.code, errno.EADDRINUSE)
self.assertEqual(mock_server.listen.call_count, MAX_PORT_SEARCH_RETRIES)
class PortRotateOneTest(unittest.TestCase):
"""Tests port rotates one port"""
which_port = mock.Mock()
def get_httpserver(self):
httpserver = mock.MagicMock()
httpserver.listen = mock.Mock()
httpserver.listen.side_effect = OSError(errno.EADDRINUSE, "test", "asd")
return httpserver
@mock.patch("streamlit.server.Server.config._set_option")
@mock.patch("streamlit.server.Server.server_port_is_manually_set")
def test_rotates_one_port(
self, patched_server_port_is_manually_set, patched__set_option
):
app = mock.MagicMock()
patched_server_port_is_manually_set.return_value = False
with pytest.raises(RetriesExceeded) as pytest_wrapped_e:
with patch.object(
tornado.httpserver, "HTTPServer", return_value=self.get_httpserver()
) as mock_server:
start_listening(app)
PortRotateOneTest.which_port.assert_called_with(8502)
patched__set_option.assert_called_with(
"server.port", 8501, config.ConfigOption.STREAMLIT_DEFINITION
)
class MetricsHandlerTest(tornado.testing.AsyncHTTPTestCase):
"""Tests the /metrics endpoint"""
def get_app(self):
return tornado.web.Application([(r"/metrics", MetricsHandler)])
def test_metrics(self):
config.set_option("global.metrics", False)
response = self.fetch("/metrics")
self.assertEqual(404, response.code)
config.set_option("global.metrics", True)
response = self.fetch("/metrics")
self.assertEqual(200, response.code)
class DebugHandlerTest(tornado.testing.AsyncHTTPTestCase):
"""Tests the /debugz endpoint"""
def get_app(self):
return tornado.web.Application([(r"/debugz", DebugHandler)])
def test_debug(self):
# TODO - debugz is currently broken
pass
class MessageCacheHandlerTest(tornado.testing.AsyncHTTPTestCase):
def get_app(self):
self._cache = ForwardMsgCache()
return tornado.web.Application(
[(r"/message", MessageCacheHandler, dict(cache=self._cache))]
)
def test_message_cache(self):
# Create a new ForwardMsg and cache it
msg = _create_dataframe_msg([1, 2, 3])
msg_hash = populate_hash_if_needed(msg)
self._cache.add_message(msg, MagicMock(), 0)
# Cache hit
response = self.fetch("/message?hash=%s" % msg_hash)
self.assertEqual(200, response.code)
self.assertEqual(serialize_forward_msg(msg), response.body)
# Cache misses
self.assertEqual(404, self.fetch("/message").code)
self.assertEqual(404, self.fetch("/message?id=non_existent").code)
| 38.5
| 88
| 0.664236
|
35b0b6a047eb2b59de4172fc24362e39d65074d9
| 15,535
|
py
|
Python
|
lib/rucio/api/rule.py
|
fno2010/rucio
|
47e93cfbe5887071c70de4ba815c1bbdddfac2ce
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/api/rule.py
|
fno2010/rucio
|
47e93cfbe5887071c70de4ba815c1bbdddfac2ce
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/api/rule.py
|
fno2010/rucio
|
47e93cfbe5887071c70de4ba815c1bbdddfac2ce
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright CERN since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rucio.api.permission import has_permission
from rucio.common.config import config_get_bool
from rucio.common.exception import AccessDenied
from rucio.common.schema import validate_schema
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import api_update_return_dict
from rucio.core import rule
from rucio.db.sqla.session import read_session, stream_session, transactional_session
@read_session
def is_multi_vo(session=None):
"""
Check whether this instance is configured for multi-VO
returns: Boolean True if running in multi-VO
"""
return config_get_bool('common', 'multi_vo', raise_exception=False, default=False, session=session)
@transactional_session
def add_replication_rule(dids, copies, rse_expression, weight, lifetime, grouping, account, locked, subscription_id, source_replica_expression,
activity, notify, purge_replicas, ignore_availability, comment, ask_approval, asynchronous, delay_injection, priority,
split_container, meta, issuer, vo='def', session=None):
"""
Adds a replication rule.
:param dids: The data identifier set.
:param copies: The number of replicas.
:param rse_expression: Boolean string expression to give the list of RSEs.
:param weight: If the weighting option of the replication rule is used, the choice of RSEs takes their weight into account.
:param lifetime: The lifetime of the replication rules (in seconds).
:param grouping: ALL - All files will be replicated to the same RSE.
DATASET - All files in the same dataset will be replicated to the same RSE.
NONE - Files will be completely spread over all allowed RSEs without any grouping considerations at all.
:param account: The account owning the rule.
:param locked: If the rule is locked, it cannot be deleted.
:param subscription_id: The subscription_id, if the rule is created by a subscription.
:param source_replica_expression: Only use replicas from this RSE as sources.
:param activity: Activity to be passed on to the conveyor.
:param notify: Notification setting of the rule.
:purge purge_replicas: The purge setting to delete replicas immediately after rule deletion.
:param ignore_availability: Option to ignore the availability of RSEs.
:param comment: Comment about the rule.
:param ask_approval: Ask for approval of this rule.
:param asynchronous: Create rule asynchronously by judge-injector.
:param priority: Priority of the transfers.
:param split_container: Should a container rule be split into individual dataset rules.
:param meta: WFMS metadata as a dictionary.
:param issuer: The issuing account of this operation.
:param vo: The VO to act on.
:param session: The database session in use.
:returns: List of created replication rules.
"""
if account is None:
account = issuer
if activity is None:
activity = 'User Subscriptions'
kwargs = {'dids': dids, 'copies': copies, 'rse_expression': rse_expression, 'weight': weight, 'lifetime': lifetime,
'grouping': grouping, 'account': account, 'locked': locked, 'subscription_id': subscription_id,
'source_replica_expression': source_replica_expression, 'notify': notify, 'activity': activity,
'purge_replicas': purge_replicas, 'ignore_availability': ignore_availability, 'comment': comment,
'ask_approval': ask_approval, 'asynchronous': asynchronous, 'delay_injection': delay_injection, 'priority': priority,
'split_container': split_container, 'meta': meta}
validate_schema(name='rule', obj=kwargs, vo=vo)
if not has_permission(issuer=issuer, vo=vo, action='add_rule', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not add replication rule' % (issuer))
account = InternalAccount(account, vo=vo)
for d in dids:
d['scope'] = InternalScope(d['scope'], vo=vo)
return rule.add_rule(account=account,
dids=dids,
copies=copies,
rse_expression=rse_expression,
grouping=grouping,
weight=weight,
lifetime=lifetime,
locked=locked,
subscription_id=subscription_id,
source_replica_expression=source_replica_expression,
activity=activity,
notify=notify,
purge_replicas=purge_replicas,
ignore_availability=ignore_availability,
comment=comment,
ask_approval=ask_approval,
asynchronous=asynchronous,
delay_injection=delay_injection,
priority=priority,
split_container=split_container,
meta=meta,
session=session)
@read_session
def get_replication_rule(rule_id, issuer, vo='def', session=None):
"""
Get replication rule by it's id.
:param rule_id: The rule_id to get.
:param issuer: The issuing account of this operation.
:param vo: The VO of the issuer.
:param session: The database session in use.
"""
kwargs = {'rule_id': rule_id}
if is_multi_vo(session=session) and not has_permission(issuer=issuer, vo=vo, action='access_rule_vo', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not access rules at other VOs.' % (issuer))
result = rule.get_rule(rule_id, session=session)
return api_update_return_dict(result, session=session)
@stream_session
def list_replication_rules(filters={}, vo='def', session=None):
"""
Lists replication rules based on a filter.
:param filters: dictionary of attributes by which the results should be filtered.
:param vo: The VO to act on.
:param session: The database session in use.
"""
# If filters is empty, create a new dict to avoid overwriting the function's default
if not filters:
filters = {}
if 'scope' in filters:
scope = filters['scope']
else:
scope = '*'
filters['scope'] = InternalScope(scope=scope, vo=vo)
if 'account' in filters:
account = filters['account']
else:
account = '*'
filters['account'] = InternalAccount(account=account, vo=vo)
rules = rule.list_rules(filters, session=session)
for r in rules:
yield api_update_return_dict(r, session=session)
@read_session
def list_replication_rule_history(rule_id, issuer, vo='def', session=None):
"""
Lists replication rule history..
:param rule_id: The rule_id to list.
:param issuer: The issuing account of this operation.
:param vo: The VO of the issuer.
:param session: The database session in use.
"""
kwargs = {'rule_id': rule_id}
if is_multi_vo(session=session) and not has_permission(issuer=issuer, vo=vo, action='access_rule_vo', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not access rules at other VOs.' % (issuer))
return rule.list_rule_history(rule_id, session=session)
@stream_session
def list_replication_rule_full_history(scope, name, vo='def', session=None):
"""
List the rule history of a DID.
:param scope: The scope of the DID.
:param name: The name of the DID.
:param vo: The VO to act on.
:param session: The database session in use.
"""
scope = InternalScope(scope, vo=vo)
rules = rule.list_rule_full_history(scope, name, session=session)
for r in rules:
yield api_update_return_dict(r, session=session)
@stream_session
def list_associated_replication_rules_for_file(scope, name, vo='def', session=None):
"""
Lists associated replication rules by file.
:param scope: Scope of the file..
:param name: Name of the file.
:param vo: The VO to act on.
:param session: The database session in use.
"""
scope = InternalScope(scope, vo=vo)
rules = rule.list_associated_rules_for_file(scope=scope, name=name, session=session)
for r in rules:
yield api_update_return_dict(r, session=session)
@transactional_session
def delete_replication_rule(rule_id, purge_replicas, issuer, vo='def', session=None):
"""
Deletes a replication rule and all associated locks.
:param rule_id: The id of the rule to be deleted
:param purge_replicas: Purge the replicas immediately
:param issuer: The issuing account of this operation
:param vo: The VO to act on.
:param session: The database session in use.
:raises: RuleNotFound, AccessDenied
"""
kwargs = {'rule_id': rule_id, 'purge_replicas': purge_replicas}
if is_multi_vo(session=session) and not has_permission(issuer=issuer, vo=vo, action='access_rule_vo', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not access rules at other VOs.' % (issuer))
if not has_permission(issuer=issuer, vo=vo, action='del_rule', kwargs=kwargs):
raise AccessDenied('Account %s can not remove this replication rule.' % (issuer))
rule.delete_rule(rule_id=rule_id, purge_replicas=purge_replicas, soft=True, session=session)
@transactional_session
def update_replication_rule(rule_id, options, issuer, vo='def', session=None):
"""
Update lock state of a replication rule.
:param rule_id: The rule_id to lock.
:param options: Options dictionary.
:param issuer: The issuing account of this operation
:param vo: The VO to act on.
:param session: The database session in use.
:raises: RuleNotFound if no Rule can be found.
"""
kwargs = {'rule_id': rule_id, 'options': options}
if is_multi_vo(session=session) and not has_permission(issuer=issuer, vo=vo, action='access_rule_vo', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not access rules at other VOs.' % (issuer))
if 'approve' in options:
if not has_permission(issuer=issuer, vo=vo, action='approve_rule', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not approve/deny this replication rule.' % (issuer))
issuer = InternalAccount(issuer, vo=vo)
if options['approve']:
rule.approve_rule(rule_id=rule_id, approver=issuer, session=session)
else:
rule.deny_rule(rule_id=rule_id, approver=issuer, reason=options.get('comment', None), session=session)
else:
if not has_permission(issuer=issuer, vo=vo, action='update_rule', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not update this replication rule.' % (issuer))
if 'account' in options:
options['account'] = InternalAccount(options['account'], vo=vo)
rule.update_rule(rule_id=rule_id, options=options, session=session)
@transactional_session
def reduce_replication_rule(rule_id, copies, exclude_expression, issuer, vo='def', session=None):
"""
Reduce the number of copies for a rule by atomically replacing the rule.
:param rule_id: Rule to be reduced.
:param copies: Number of copies of the new rule.
:param exclude_expression: RSE Expression of RSEs to exclude.
:param issuer: The issuing account of this operation
:param vo: The VO to act on.
:param session: The database session in use.
:raises: RuleReplaceFailed, RuleNotFound
"""
kwargs = {'rule_id': rule_id, 'copies': copies, 'exclude_expression': exclude_expression}
if is_multi_vo(session=session) and not has_permission(issuer=issuer, vo=vo, action='access_rule_vo', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not access rules at other VOs.' % (issuer))
if not has_permission(issuer=issuer, vo=vo, action='reduce_rule', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not reduce this replication rule.' % (issuer))
return rule.reduce_rule(rule_id=rule_id, copies=copies, exclude_expression=exclude_expression, session=session)
@read_session
def examine_replication_rule(rule_id, issuer, vo='def', session=None):
"""
Examine a replication rule.
:param rule_id: The rule_id to get.
:param issuer: The issuing account of this operation.
:param vo: The VO of the issuer.
:param session: The database session in use.
"""
kwargs = {'rule_id': rule_id}
if is_multi_vo(session=session) and not has_permission(issuer=issuer, vo=vo, action='access_rule_vo', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not access rules at other VOs.' % (issuer))
result = rule.examine_rule(rule_id, session=session)
result = api_update_return_dict(result, session=session)
if 'transfers' in result:
result['transfers'] = [api_update_return_dict(t, session=session) for t in result['transfers']]
return result
@transactional_session
def move_replication_rule(rule_id, rse_expression, override, issuer, vo='def', session=None):
"""
Move a replication rule to another RSE and, once done, delete the original one.
:param rule_id: Rule to be moved.
:param rse_expression: RSE expression of the new rule.
:param override: Configurations to update for the new rule.
:param session: The DB Session.
:param vo: The VO to act on.
:raises: RuleNotFound, RuleReplaceFailed, InvalidRSEExpression, AccessDenied
"""
kwargs = {
'rule_id': rule_id,
'rse_expression': rse_expression,
'override': override,
}
if is_multi_vo(session=session) and not has_permission(issuer=issuer, vo=vo, action='access_rule_vo', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not access rules at other VOs.' % (issuer))
if not has_permission(issuer=issuer, vo=vo, action='move_rule', kwargs=kwargs, session=session):
raise AccessDenied('Account %s can not move this replication rule.' % (issuer))
return rule.move_rule(**kwargs, session=session)
| 47.362805
| 147
| 0.659994
|
e863fa1d724e3582c7975dd274fb07e42b4f72bb
| 15,980
|
py
|
Python
|
InvenTree/company/views.py
|
ArakniD/InvenTree
|
0ebf2ebd832b2d736e895abe054ca56bfd1cc477
|
[
"MIT"
] | null | null | null |
InvenTree/company/views.py
|
ArakniD/InvenTree
|
0ebf2ebd832b2d736e895abe054ca56bfd1cc477
|
[
"MIT"
] | null | null | null |
InvenTree/company/views.py
|
ArakniD/InvenTree
|
0ebf2ebd832b2d736e895abe054ca56bfd1cc477
|
[
"MIT"
] | null | null | null |
"""
Django views for interacting with Company app
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.views.generic import DetailView, ListView, UpdateView
from django.urls import reverse
from django.forms import HiddenInput
from moneyed import CURRENCIES
from InvenTree.views import AjaxCreateView, AjaxUpdateView, AjaxDeleteView
from InvenTree.helpers import str2bool
from InvenTree.views import InvenTreeRoleMixin
from .models import Company
from .models import SupplierPart
from .models import SupplierPriceBreak
from part.models import Part
from .forms import EditCompanyForm
from .forms import CompanyImageForm
from .forms import EditSupplierPartForm
from .forms import EditPriceBreakForm
import common.models
import common.settings
class CompanyIndex(InvenTreeRoleMixin, ListView):
""" View for displaying list of companies
"""
model = Company
template_name = 'company/index.html'
context_object_name = 'companies'
paginate_by = 50
permission_required = 'company.view_company'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
# Provide custom context data to the template,
# based on the URL we use to access this page
lookup = {
reverse('supplier-index'): {
'title': _('Suppliers'),
'button_text': _('New Supplier'),
'filters': {'is_supplier': 'true'},
'create_url': reverse('supplier-create'),
'pagetype': 'suppliers',
},
reverse('manufacturer-index'): {
'title': _('Manufacturers'),
'button_text': _('New Manufacturer'),
'filters': {'is_manufacturer': 'true'},
'create_url': reverse('manufacturer-create'),
'pagetype': 'manufacturers',
},
reverse('customer-index'): {
'title': _('Customers'),
'button_text': _('New Customer'),
'filters': {'is_customer': 'true'},
'create_url': reverse('customer-create'),
'pagetype': 'customers',
}
}
default = {
'title': _('Companies'),
'button_text': _('New Company'),
'filters': {},
'create_url': reverse('company-create'),
'pagetype': 'companies'
}
context = None
for item in lookup:
if self.request.path == item:
context = lookup[item]
break
if context is None:
context = default
for key, value in context.items():
ctx[key] = value
return ctx
def get_queryset(self):
""" Retrieve the Company queryset based on HTTP request parameters.
- supplier: Filter by supplier
- customer: Filter by customer
"""
queryset = Company.objects.all().order_by('name')
if self.request.GET.get('supplier', None):
queryset = queryset.filter(is_supplier=True)
if self.request.GET.get('customer', None):
queryset = queryset.filter(is_customer=True)
return queryset
class CompanyNotes(UpdateView):
""" View for editing the 'notes' field of a Company object.
"""
context_object_name = 'company'
template_name = 'company/notes.html'
model = Company
fields = ['notes']
permission_required = 'company.view_company'
def get_success_url(self):
return reverse('company-notes', kwargs={'pk': self.get_object().id})
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['editing'] = str2bool(self.request.GET.get('edit', ''))
return ctx
class CompanyDetail(DetailView):
""" Detail view for Company object """
context_obect_name = 'company'
template_name = 'company/detail.html'
queryset = Company.objects.all()
model = Company
permission_required = 'company.view_company'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
return ctx
class CompanyImage(AjaxUpdateView):
""" View for uploading an image for the Company """
model = Company
ajax_template_name = 'modal_form.html'
ajax_form_title = _('Update Company Image')
form_class = CompanyImageForm
permission_required = 'company.change_company'
def get_data(self):
return {
'success': _('Updated company image'),
}
class CompanyEdit(AjaxUpdateView):
""" View for editing a Company object """
model = Company
form_class = EditCompanyForm
context_object_name = 'company'
ajax_template_name = 'modal_form.html'
ajax_form_title = _('Edit Company')
permission_required = 'company.change_company'
def get_data(self):
return {
'info': _('Edited company information'),
}
class CompanyCreate(AjaxCreateView):
""" View for creating a new Company object """
model = Company
context_object_name = 'company'
form_class = EditCompanyForm
ajax_template_name = 'modal_form.html'
permission_required = 'company.add_company'
def get_form_title(self):
url = self.request.path
if url == reverse('supplier-create'):
return _("Create new Supplier")
if url == reverse('manufacturer-create'):
return _('Create new Manufacturer')
if url == reverse('customer-create'):
return _('Create new Customer')
return _('Create new Company')
def get_initial(self):
""" Initial values for the form data """
initials = super().get_initial().copy()
url = self.request.path
if url == reverse('supplier-create'):
initials['is_supplier'] = True
initials['is_customer'] = False
initials['is_manufacturer'] = False
elif url == reverse('manufacturer-create'):
initials['is_manufacturer'] = True
initials['is_supplier'] = True
initials['is_customer'] = False
elif url == reverse('customer-create'):
initials['is_customer'] = True
initials['is_manufacturer'] = False
initials['is_supplier'] = False
return initials
def get_data(self):
return {
'success': _("Created new company"),
}
class CompanyDelete(AjaxDeleteView):
""" View for deleting a Company object """
model = Company
success_url = '/company/'
ajax_template_name = 'company/delete.html'
ajax_form_title = _('Delete Company')
context_object_name = 'company'
permission_required = 'company.delete_company'
def get_data(self):
return {
'danger': _('Company was deleted'),
}
class SupplierPartDetail(DetailView):
""" Detail view for SupplierPart """
model = SupplierPart
template_name = 'company/supplier_part_detail.html'
context_object_name = 'part'
queryset = SupplierPart.objects.all()
permission_required = 'purchase_order.view'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
return ctx
class SupplierPartEdit(AjaxUpdateView):
""" Update view for editing SupplierPart """
model = SupplierPart
context_object_name = 'part'
form_class = EditSupplierPartForm
ajax_template_name = 'modal_form.html'
ajax_form_title = _('Edit Supplier Part')
role_required = 'purchase_order.change'
def get_form(self):
form = super().get_form()
supplier_part = self.get_object()
# It appears that hiding a MoneyField fails validation
# Therefore the idea to set the value before hiding
if form.is_valid():
form.cleaned_data['single_pricing'] = supplier_part.unit_pricing
# Hide the single-pricing field (only for creating a new SupplierPart!)
form.fields['single_pricing'].widget = HiddenInput()
return form
class SupplierPartCreate(AjaxCreateView):
""" Create view for making new SupplierPart """
model = SupplierPart
form_class = EditSupplierPartForm
ajax_template_name = 'company/supplier_part_create.html'
ajax_form_title = _('Create new Supplier Part')
context_object_name = 'part'
role_required = 'purchase_order.add'
def validate(self, part, form):
single_pricing = form.cleaned_data.get('single_pricing', None)
if single_pricing:
# TODO - What validation steps can be performed on the single_pricing field?
pass
def get_context_data(self):
"""
Supply context data to the form
"""
ctx = super().get_context_data()
# Add 'part' object
form = self.get_form()
part = form['part'].value()
try:
part = Part.objects.get(pk=part)
except (ValueError, Part.DoesNotExist):
part = None
ctx['part'] = part
return ctx
def save(self, form):
"""
If single_pricing is defined, add a price break for quantity=1
"""
# Save the supplier part object
supplier_part = super().save(form)
single_pricing = form.cleaned_data.get('single_pricing', None)
if single_pricing:
supplier_part.add_price_break(1, single_pricing)
return supplier_part
def get_form(self):
""" Create Form instance to create a new SupplierPart object.
Hide some fields if they are not appropriate in context
"""
form = super(AjaxCreateView, self).get_form()
if form.initial.get('part', None):
# Hide the part field
form.fields['part'].widget = HiddenInput()
return form
def get_initial(self):
""" Provide initial data for new SupplierPart:
- If 'supplier_id' provided, pre-fill supplier field
- If 'part_id' provided, pre-fill part field
"""
initials = super(SupplierPartCreate, self).get_initial().copy()
manufacturer_id = self.get_param('manufacturer')
supplier_id = self.get_param('supplier')
part_id = self.get_param('part')
supplier = None
if supplier_id:
try:
supplier = Company.objects.get(pk=supplier_id)
initials['supplier'] = supplier
except (ValueError, Company.DoesNotExist):
supplier = None
if manufacturer_id:
try:
initials['manufacturer'] = Company.objects.get(pk=manufacturer_id)
except (ValueError, Company.DoesNotExist):
pass
if part_id:
try:
initials['part'] = Part.objects.get(pk=part_id)
except (ValueError, Part.DoesNotExist):
pass
# Initial value for single pricing
if supplier:
currency_code = supplier.currency_code
else:
currency_code = common.settings.currency_code_default()
currency = CURRENCIES.get(currency_code, None)
if currency_code:
initials['single_pricing'] = ('', currency)
return initials
class SupplierPartDelete(AjaxDeleteView):
""" Delete view for removing a SupplierPart.
SupplierParts can be deleted using a variety of 'selectors'.
- ?part=<pk> -> Delete a single SupplierPart object
- ?parts=[] -> Delete a list of SupplierPart objects
"""
success_url = '/supplier/'
ajax_template_name = 'company/partdelete.html'
ajax_form_title = _('Delete Supplier Part')
role_required = 'purchase_order.delete'
parts = []
def get_context_data(self):
ctx = {}
ctx['parts'] = self.parts
return ctx
def get_parts(self):
""" Determine which SupplierPart object(s) the user wishes to delete.
"""
self.parts = []
# User passes a single SupplierPart ID
if 'part' in self.request.GET:
try:
self.parts.append(SupplierPart.objects.get(pk=self.request.GET.get('part')))
except (ValueError, SupplierPart.DoesNotExist):
pass
elif 'parts[]' in self.request.GET:
part_id_list = self.request.GET.getlist('parts[]')
self.parts = SupplierPart.objects.filter(id__in=part_id_list)
def get(self, request, *args, **kwargs):
self.request = request
self.get_parts()
return self.renderJsonResponse(request, form=self.get_form())
def post(self, request, *args, **kwargs):
""" Handle the POST action for deleting supplier parts.
"""
self.request = request
self.parts = []
for item in self.request.POST:
if item.startswith('supplier-part-'):
pk = item.replace('supplier-part-', '')
try:
self.parts.append(SupplierPart.objects.get(pk=pk))
except (ValueError, SupplierPart.DoesNotExist):
pass
confirm = str2bool(self.request.POST.get('confirm_delete', False))
data = {
'form_valid': confirm,
}
if confirm:
for part in self.parts:
part.delete()
return self.renderJsonResponse(self.request, data=data, form=self.get_form())
class PriceBreakCreate(AjaxCreateView):
""" View for creating a supplier price break """
model = SupplierPriceBreak
form_class = EditPriceBreakForm
ajax_form_title = _('Add Price Break')
ajax_template_name = 'modal_form.html'
role_required = 'purchase_order.add'
def get_data(self):
return {
'success': _('Added new price break')
}
def get_part(self):
"""
Attempt to extract SupplierPart object from the supplied data.
"""
try:
supplier_part = SupplierPart.objects.get(pk=self.request.GET.get('part'))
return supplier_part
except (ValueError, SupplierPart.DoesNotExist):
pass
try:
supplier_part = SupplierPart.objects.get(pk=self.request.POST.get('part'))
return supplier_part
except (ValueError, SupplierPart.DoesNotExist):
pass
return None
def get_form(self):
form = super(AjaxCreateView, self).get_form()
form.fields['part'].widget = HiddenInput()
return form
def get_initial(self):
initials = super(AjaxCreateView, self).get_initial()
supplier_part = self.get_part()
initials['part'] = self.get_part()
if supplier_part is not None:
currency_code = supplier_part.supplier.currency_code
else:
currency_code = common.settings.currency_code_default()
# Extract the currency object associated with the code
currency = CURRENCIES.get(currency_code, None)
if currency:
initials['price'] = [1.0, currency]
return initials
class PriceBreakEdit(AjaxUpdateView):
""" View for editing a supplier price break """
model = SupplierPriceBreak
form_class = EditPriceBreakForm
ajax_form_title = _('Edit Price Break')
ajax_template_name = 'modal_form.html'
role_required = 'purchase_order.change'
def get_form(self):
form = super(AjaxUpdateView, self).get_form()
form.fields['part'].widget = HiddenInput()
return form
class PriceBreakDelete(AjaxDeleteView):
""" View for deleting a supplier price break """
model = SupplierPriceBreak
ajax_form_title = _("Delete Price Break")
ajax_template_name = 'modal_delete_form.html'
role_required = 'purchase_order.delete'
| 28.183422
| 92
| 0.614831
|
4a2fa8666de20f1bff4e9df5492e8303a9a8752c
| 1,397
|
py
|
Python
|
app/twitter-authorize.py
|
maxnovais/twepy
|
665782420be7a3574f81f69902685af49e9e8ddd
|
[
"Apache-2.0"
] | null | null | null |
app/twitter-authorize.py
|
maxnovais/twepy
|
665782420be7a3574f81f69902685af49e9e8ddd
|
[
"Apache-2.0"
] | null | null | null |
app/twitter-authorize.py
|
maxnovais/twepy
|
665782420be7a3574f81f69902685af49e9e8ddd
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from config import TWITTER_API
"""
twitter-authorize:
- step through the process of creating and authorization a
Twitter application.
Based on: https://github.com/ideoforms/python-twitter-examples
"""
import twitter
import time
print("1. Create a new Twitter application here: https://apps.twitter.com")
print("When you have created the application, enter:")
print(" your application name: "),
app_name = input()
if TWITTER_API['consumer_key']:
consumer_key = TWITTER_API['consumer_key']
else:
print(" your consumer key: "),
consumer_key = input()
if TWITTER_API['consumer_secret']:
consumer_secret = TWITTER_API['consumer_secret']
else:
print(" your consumer secret: "),
consumer_secret = input()
print("2. Now, authorize this application.")
print("You'll be forwarded to a web browser in two seconds.")
print()
time.sleep(2)
try:
access_key, access_secret = twitter.oauth_dance(app_name, consumer_key, consumer_secret)
except Exception as e:
print("Error: {}".format(e))
print("Done.")
print("Now, replace the credentials in config.py with the below:")
print()
print("TWITTER_API = {")
print(" 'consumer_key': '{}',".format(consumer_key))
print(" 'consumer_secret': '{}',".format(consumer_secret))
print(" 'access_key': '{}',".format(access_key))
print(" 'access_secret': '{}',".format(access_secret))
print("}")
| 25.87037
| 92
| 0.704366
|
49532eae9033453f9324228392736216faf76860
| 4,550
|
py
|
Python
|
plugins/backbones/threatbus_rabbitmq/threatbus_rabbitmq/plugin.py
|
GTrunSec/threatbus
|
030993a0d10adf25929b85ef0a19bbdc657210f6
|
[
"BSD-3-Clause"
] | 212
|
2020-01-25T12:05:54.000Z
|
2022-03-22T05:59:35.000Z
|
plugins/backbones/threatbus_rabbitmq/threatbus_rabbitmq/plugin.py
|
GTrunSec/threatbus
|
030993a0d10adf25929b85ef0a19bbdc657210f6
|
[
"BSD-3-Clause"
] | 57
|
2020-01-28T14:23:32.000Z
|
2022-03-10T13:18:11.000Z
|
plugins/backbones/threatbus_rabbitmq/threatbus_rabbitmq/plugin.py
|
GTrunSec/threatbus
|
030993a0d10adf25929b85ef0a19bbdc657210f6
|
[
"BSD-3-Clause"
] | 11
|
2020-02-01T15:15:15.000Z
|
2022-01-20T18:37:22.000Z
|
from collections import defaultdict
from dynaconf import Validator
from dynaconf.utils.boxing import DynaBox
from multiprocessing import JoinableQueue
import pika
from retry import retry
from socket import gethostname
from stix2 import Indicator, Sighting
import threading
import threatbus
from threatbus.data import SnapshotRequest, SnapshotEnvelope
from threatbus_rabbitmq import RabbitMQConsumer, RabbitMQPublisher
from typing import Dict, List, Union
"""RabbitMQ backbone plugin for Threat Bus"""
plugin_name = "rabbitmq"
subscriptions: Dict[str, set] = defaultdict(set)
lock = threading.Lock()
workers: List[threatbus.StoppableWorker] = list()
def provision(
topic: str, msg: Union[Indicator, Sighting, SnapshotEnvelope, SnapshotRequest]
):
"""
Provisions the given `msg` to all subscribers of `topic`.
@param topic The topic string to use for provisioning
@param msg The message to provision
"""
global subscriptions, lock, logger
lock.acquire()
for t in filter(lambda t: str(topic).startswith(str(t)), subscriptions.keys()):
for outq in subscriptions[t]:
outq.put(msg)
lock.release()
logger.debug(f"Relayed message from RabbitMQ: {msg}")
@threatbus.backbone
def config_validators() -> List[Validator]:
return [
Validator(
f"plugins.backbones.{plugin_name}.host",
f"plugins.backbones.{plugin_name}.username",
f"plugins.backbones.{plugin_name}.password",
is_type_of=str,
required=True,
),
Validator(
f"plugins.backbones.{plugin_name}.vhost",
is_type_of=str,
default="/",
),
Validator(
f"plugins.backbones.{plugin_name}.exchange_name",
default="threatbus",
),
Validator(
f"plugins.backbones.{plugin_name}.port",
is_type_of=int,
required=True,
),
Validator(
f"plugins.backbones.{plugin_name}.queue.durable",
f"plugins.backbones.{plugin_name}.queue.lazy",
is_type_of=bool,
default=True,
),
Validator(
f"plugins.backbones.{plugin_name}.queue.auto_delete",
f"plugins.backbones.{plugin_name}.queue.exclusive",
is_type_of=bool,
default=False,
),
Validator(
f"plugins.backbones.{plugin_name}.queue.name_join_symbol",
required=True,
default=".",
),
Validator(
f"plugins.backbones.{plugin_name}.queue.name_suffix",
default=gethostname(),
),
Validator(
f"plugins.backbones.{plugin_name}.queue.max_items",
is_type_of=int,
default=0,
),
]
@retry(delay=5)
@threatbus.backbone
def subscribe(topic: str, q: JoinableQueue):
"""
Threat Bus' subscribe hook. Used to register new app-queues for certain
topics.
"""
global logger, subscriptions, lock
logger.info(f"Adding subscription to: {topic}")
lock.acquire()
subscriptions[topic].add(q)
lock.release()
@threatbus.backbone
def unsubscribe(topic: str, q: JoinableQueue):
"""
Threat Bus' unsubscribe hook. Used to deregister app-queues from certain
topics.
"""
global logger, subscriptions, lock
logger.info(f"Removing subscription from: {topic}")
lock.acquire()
if q in subscriptions[topic]:
subscriptions[topic].remove(q)
lock.release()
@threatbus.backbone
def run(config: DynaBox, logging: DynaBox, inq: JoinableQueue):
global subscriptions, lock, logger, workers
assert plugin_name in config, f"Cannot find configuration for {plugin_name} plugin"
logger = threatbus.logger.setup(logging, __name__)
config = config[plugin_name]
credentials = pika.PlainCredentials(config.username, config.password)
conn_params = pika.ConnectionParameters(
config.host, config.port, config.vhost, credentials
)
workers.append(
RabbitMQConsumer(
conn_params, config.exchange_name, config.queue, provision, logger
)
)
workers.append(RabbitMQPublisher(conn_params, config.exchange_name, inq, logger))
for w in workers:
w.start()
logger.info("RabbitMQ backbone started.")
@threatbus.backbone
def stop():
global logger, workers
for w in workers:
if not w.is_alive():
continue
w.stop()
w.join()
logger.info("RabbitMQ backbone stopped")
| 29.934211
| 87
| 0.648132
|
426c07ca89f3365a8feb3d5f990d722985c3771e
| 11,801
|
py
|
Python
|
src/python/turicreate/test/test_json.py
|
Bpowers4/turicreate
|
73dad213cc1c4f74337b905baea2b3a1e5a0266c
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/turicreate/test/test_json.py
|
Bpowers4/turicreate
|
73dad213cc1c4f74337b905baea2b3a1e5a0266c
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/turicreate/test/test_json.py
|
Bpowers4/turicreate
|
73dad213cc1c4f74337b905baea2b3a1e5a0266c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
# This file tests invertibility (serializing to/from) the "serializable" format
# of variant_type (produced by extensions.json). This extension results in a
# naively-JSON-serializable flexible_type that should retain all necessary
# information to be rehydrated into the original variant_type.
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import array
import datetime
import json # Python built-in JSON module
import math
import os
import pandas
import pytest
import pytz
import six
import string
import sys
import unittest
import tempfile
from . import util
from .. import _json # turicreate._json
from ..data_structures.sarray import SArray
from ..data_structures.sframe import SFrame
from ..data_structures.sgraph import SGraph, Vertex, Edge
if sys.version_info.major == 3:
long = int
class image_info:
def __init__(self, url):
self.url = url
if "png" in url:
self.format = "PNG"
elif "jpg" in url:
self.format = "JPG"
if "grey" in url:
self.channels = 1
else:
self.channels = 3
current_file_dir = os.path.dirname(os.path.realpath(__file__))
image_urls = [
current_file_dir + x
for x in [
"/images/nested/sample_grey.jpg",
"/images/nested/sample_grey.png",
"/images/sample.jpg",
"/images/sample.png",
]
]
image_info = [image_info(u) for u in image_urls]
_SFrameComparer = util.SFrameComparer()
class JSONTest(unittest.TestCase):
def _assertEqual(self, x, y):
if type(x) in [long, int]:
self.assertTrue(type(y) in [long, int])
elif isinstance(x, six.string_types):
self.assertTrue(isinstance(y, six.string_types))
else:
self.assertEqual(type(x), type(y))
if isinstance(x, six.string_types):
self.assertEqual(str(x), str(y))
elif isinstance(x, SArray):
_SFrameComparer._assert_sarray_equal(x, y)
elif isinstance(x, SFrame):
_SFrameComparer._assert_sframe_equal(x, y)
elif isinstance(x, SGraph):
_SFrameComparer._assert_sgraph_equal(x, y)
elif isinstance(x, dict):
for (k1, v1), (k2, v2) in zip(sorted(x.items()), sorted(y.items())):
self._assertEqual(k1, k2)
self._assertEqual(v1, v2)
elif isinstance(x, list):
for v1, v2 in zip(x, y):
self._assertEqual(v1, v2)
else:
self.assertEqual(x, y)
def _run_test_case(self, value):
# test that JSON serialization is invertible with respect to both
# value and type.
(data, schema) = _json.to_serializable(value)
# ensure that resulting value is actually naively serializable
data = json.loads(json.dumps(data, allow_nan=False))
schema = json.loads(json.dumps(schema, allow_nan=False))
# print("----------------------------------")
# print("Value: %s" % value)
# print("Serializable Data: %s" % data)
# print("Serializable Schema: %s" % schema)
result = _json.from_serializable(data, schema)
# print("Deserialized Result: %s" % result)
# print("----------------------------------")
self._assertEqual(result, value)
# test that JSON serialization gives expected result
serialized = _json.dumps(value)
deserialized = _json.loads(serialized)
self._assertEqual(deserialized, value)
@unittest.skipIf(sys.platform == "win32", "Windows long issue")
def test_int(self):
[
self._run_test_case(value)
for value in [
0,
1,
-2147483650,
-2147483649, # boundary of accurate representation in JS 64-bit float
2147483648, # boundary of accurate representation in JS 64-bit float
2147483649,
]
]
def test_float(self):
[
self._run_test_case(value)
for value in [-1.1, -1.0, 0.0, 1.0, 1.1, float("-inf"), float("inf"),]
]
self.assertTrue(
math.isnan(_json.from_serializable(*_json.to_serializable(float("nan"))))
)
def test_string_to_json(self):
[self._run_test_case(value) for value in ["hello", "a'b", 'a"b', "ɖɞɫɷ",]]
def test_vec_to_json(self):
[
self._run_test_case(value)
for value in [
array.array("d"),
array.array("d", [1.5]),
array.array("d", [2.1, 2.5, 3.1]),
array.array("d", [float("-inf"), float("inf")]),
]
]
def test_list_to_json(self):
# TODO -- we can't test lists of numbers, due to
# Python<->flexible_type not being reversible for lists of numbers.
# if `list` of `int` goes into C++, the flexible_type representation
# becomes flex_vec (vector<flex_float>). This is a lossy representation.
# known issue, can't resolve here.
[
self._run_test_case(value)
for value in [
[],
["hello", "world"],
["hello", 3, None],
[3.14159, None],
[{}, {"x": 1, "y": 2}],
["hello", float("-inf"), float("inf")],
]
]
def test_dict_to_json(self):
[self._run_test_case(value) for value in [{}, {"x": 1, "y": 2},]]
def test_date_time_to_json(self):
d = datetime.datetime(year=2016, month=3, day=5)
[
self._run_test_case(value)
for value in [
d,
pytz.utc.localize(d),
pytz.timezone("US/Arizona").localize(d),
]
]
def test_image_to_json(self):
from .. import Image
[
self._run_test_case(value)
for value in [
Image(path=item.url, format=item.format) for item in image_info
]
]
def test_sarray_to_json(self):
from .. import Image
d = datetime.datetime(year=2016, month=3, day=5)
[
self._run_test_case(value)
for value in [
SArray(),
SArray([1, 2, 3]),
SArray([1.0, 2.0, 3.0]),
SArray([None, 3, None]),
SArray(["hello", "world"]),
SArray(array.array("d", [2.1, 2.5, 3.1])),
SArray(
[["hello", None, "world"], ["hello", 3, None], [3.14159, None],]
),
SArray([{"x": 1, "y": 2}, {"x": 5, "z": 3},]),
SArray(
[d, pytz.utc.localize(d), pytz.timezone("US/Arizona").localize(d),]
),
SArray(
[Image(path=item.url, format=item.format) for item in image_info]
),
]
]
def test_sframe_to_json(self):
[
self._run_test_case(value)
for value in [
SFrame(),
SFrame({"foo": [1, 2, 3, 4], "bar": [None, "Hello", None, "World"]}),
]
]
def test_sgraph_to_json(self):
sg = SGraph()
self._run_test_case(sg)
sg = sg.add_vertices([Vertex(x) for x in [1, 2, 3, 4]])
sg = sg.add_edges([Edge(x, x + 1) for x in [1, 2, 3]])
self._run_test_case(sg)
def test_nested_to_json(self):
# not tested in the cases above: nested data, nested schema
# (but all flexible_type compatible)
[
self._run_test_case(value)
for value in [
{
"foo": ["a", "b", "c"],
"bar": array.array("d", [0.0, float("inf"), float("-inf")]),
},
[["a", "b", "c"], array.array("d", [0.0, float("inf"), float("-inf")])],
{
"baz": {
"foo": ["a", "b", "c"],
"bar": array.array("d", [0.0, float("inf"), float("-inf")]),
},
"qux": [
["a", "b", "c"],
array.array("d", [0.0, float("inf"), float("-inf")]),
],
},
]
]
def test_variant_to_json(self):
# not tested in the cases above: variant_type other than SFrame-like
# but containing SFrame-like (so cannot be a flexible_type)
sf = SFrame({"col1": [1, 2], "col2": ["hello", "world"]})
sa = SArray([5.0, 6.0, 7.0])
[self._run_test_case(value) for value in [{"foo": sf, "bar": sa}, [sf, sa],]]
def test_malformed_json(self):
out = """
[
{
"text": "["I", "have", "an", "atlas"]",
"label": ["NONE", "NONE", "NONE", "NONE"]
},
{
"text": ["These", "are", "my", "dogs"],
"label": ["NONE", "NONE", "NONE", "PLN"]
},
{
"text": ["The", "sheep", "are", "fluffy"],
"label": ["NONE","PLN","NONE","NONE"]
},
{
"text": ["Billiards", "is", "my", "favourite", "game"],
"label": ["NONE", "NONE", "NONE", "NONE", "NONE"]
},
{
"text": ["I", "went", "to", "five", "sessions", "today"],
"label": ["NONE", "NONE", "NONE", "NONE", "PLN", "NONE"]
}
]
"""
with tempfile.NamedTemporaryFile("w") as f:
f.write(out)
f.flush()
self.assertRaises(RuntimeError, SArray.read_json, f.name)
self.assertRaises(RuntimeError, SFrame.read_json, f.name)
def test_nonexistant_json(self):
self.assertRaises(IOError, SArray.read_json, "/nonexistant.json")
self.assertRaises(IOError, SFrame.read_json, "/nonexistant.json")
def test_strange_128_char_corner_case(self):
json_text = """
{"foo":[{"bar":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. In eget odio velit. Suspendisse potenti. Vivamus a urna feugiat nullam."}]}
"""
with tempfile.NamedTemporaryFile("w") as f:
f.write(json_text)
f.flush()
df = pandas.read_json(f.name, lines=True)
sf_actual = SFrame.read_json(f.name, orient="lines")
sf_expected = SFrame(df)
_SFrameComparer._assert_sframe_equal(sf_expected, sf_actual)
def test_true_false_substitutions(self):
expecteda = [["a", "b", "c"], ["a", "b", "c"]]
expectedb = [["d", "false", "e", 0, "true", 1, "a"], ["d", "e", "f"]]
records_json_file = """
[{"a" : ["a", "b", "c"],
"b" : ["d", "false", "e", false, "true", true, "a"]},
{"a" : ["a", "b", "c"],
"b" : ["d", "e", "f"]}]
"""
lines_json_file = """
{"a" : ["a", "b", "c"], "b" : ["d", "false", "e", false, "true", true, "a"]}
{"a" : ["a", "b", "c"], "b" : ["d", "e", "f"]}
"""
with tempfile.NamedTemporaryFile("w") as f:
f.write(records_json_file)
f.flush()
records = SFrame.read_json(f.name, orient="records")
self.assertEqual(list(records["a"]), expecteda)
self.assertEqual(list(records["b"]), expectedb)
with tempfile.NamedTemporaryFile("w") as f:
f.write(lines_json_file)
f.flush()
lines = SFrame.read_json(f.name, orient="lines")
self.assertEqual(list(lines["a"]), expecteda)
self.assertEqual(list(lines["b"]), expectedb)
| 33.813754
| 148
| 0.528769
|
e690d27d49143abb2bee86e73f57c1a70085d47e
| 1,710
|
py
|
Python
|
corehq/apps/hqadmin/pillow_settings.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T13:10:01.000Z
|
2020-05-05T13:10:01.000Z
|
corehq/apps/hqadmin/pillow_settings.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2019-12-09T14:00:14.000Z
|
2019-12-09T14:00:14.000Z
|
corehq/apps/hqadmin/pillow_settings.py
|
MaciejChoromanski/commcare-hq
|
fd7f65362d56d73b75a2c20d2afeabbc70876867
|
[
"BSD-3-Clause"
] | 5
|
2015-11-30T13:12:45.000Z
|
2019-07-01T19:27:07.000Z
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from copy import deepcopy
import json
def _get_pillow_configs_from_settings_dict(pillows_by_group):
# this sucks, but not sure there's a better way to make it available to fabric
from manage import init_hq_python_path
init_hq_python_path()
from pillowtop.utils import get_pillow_configs_from_settings_dict
return get_pillow_configs_from_settings_dict(pillows_by_group)
def get_pillows_for_env(pillow_env_configs, pillows_by_group=None):
"""
:param pillow_env_configs {pillow_name: {params to pass to supervisor generator}}
"""
if pillows_by_group is None:
from django.conf import settings
pillows_by_group = settings.PILLOWTOPS
return _get_pillows_for_env(pillow_env_configs, pillows_by_group)
def _get_pillows_for_env(pillow_env_configs, pillows_by_group):
ret = []
pillow_names = set(pillow_env_configs)
pillow_configs = _get_pillow_configs_from_settings_dict(pillows_by_group)
pillows_for_env = [config for config in pillow_configs if config.name in pillow_names]
for config in pillows_for_env:
final_config = deepcopy(config)
final_config.params.update(pillow_env_configs[config.name])
ret.append(final_config)
return ret
def test_pillow_settings(env_name, pillows_by_group):
from fab.fabfile import load_env
from fab.utils import get_pillow_env_config
load_env(env_name)
pillows = get_pillows_for_env(get_pillow_env_config(env_name), pillows_by_group=pillows_by_group)
print("Configured Pillows")
print(json.dumps(pillows, sort_keys=True, indent=2))
| 36.382979
| 101
| 0.788304
|
331957b3c926360a94daffdfa8a349fbde12e0f7
| 8,941
|
py
|
Python
|
Clean_StreetNames.py
|
EricSamsonCarto/Clean_StreetName_ArcpyTool
|
ec12b568baee65fab6583ee2d4d9482666978e1a
|
[
"CC0-1.0"
] | 1
|
2020-09-22T18:11:28.000Z
|
2020-09-22T18:11:28.000Z
|
Clean_StreetNames.py
|
EricSamsonCarto/Clean_StreetName_ArcpyTool
|
ec12b568baee65fab6583ee2d4d9482666978e1a
|
[
"CC0-1.0"
] | null | null | null |
Clean_StreetNames.py
|
EricSamsonCarto/Clean_StreetName_ArcpyTool
|
ec12b568baee65fab6583ee2d4d9482666978e1a
|
[
"CC0-1.0"
] | null | null | null |
import arcgis
import pandas as pd
import os
import arcpy
"""--------------------------------------------------------------------------------
Script Name: Clean Street Names
Description: This script fixes and cleans a layer with a "FullName" street
field. A "FullName" street field includes street name and street prefix. A "FLAG"
field is created in the output layer that shows fields with one element in its
string or fewer, or 5 elements in the string or more. This field can be used as
a inspect field for data integerity.
Examples:
INPUT OUTPUT
---------------------------------------------
walnut blv. ---> WALNUT BLVD
MaIn Street. ---> MAIN ST
Silver road east ---> E SILVER RD
89 Highway (Eastbound) ---> EB 89 HWY
knoll creek ---> KNOLL CR
SOUTH / richmond av ---> S RICHMOND AVE
An excel sheet is needed in order to run the script. The excel sheet is called "NameABBRVs"
and needs to be saved within the same directory as the script. It
contains two lists. One with street prefix's and one with street abrvs
Created By: Eric Samson.
Date: 3/25/2019.
------------------------------------------------------------------------------------"""
arcpy.env.overwriteOutput = True
#inFC, AS PARAMETER, fcName REPRESENTS PATH TO THE FC, GET INFIELD FROM USER, GET OUTFC FROM USER
inFC = arcpy.GetParameterAsText(0)
fcName = os.path.basename(inFC)
inField = arcpy.GetParameterAsText(1)
outFC = arcpy.GetParameterAsText(2)
fc_df = pd.DataFrame.spatial.from_featureclass(fcName)
fc_df.head()
#-------------------------------------------------------------------------------------
#UPPERCASE, REMOVE LEADING AND TRAILING WHITE SPACES, REMOVE EXTRA SPACES
fc_df[inField] = fc_df[inField].str.upper()
fc_df[inField] = fc_df[inField].str.strip()
fc_df[inField] = fc_df[inField].replace('\s+', ' ', regex=True)
#REMOVE SPECIAL CHARECTERS
SpecialCHAR = ['.', '&', ')', '(', '/', '-','{', '}', '*', '$', '%', '^', '@', '!', '_', '~', ':', '?', ']', '[', '=']
for x in SpecialCHAR:
fc_df[inField] = fc_df[inField].str.replace(x, '')
#-------------------------------------------------------------------------------------
#REPLACE DIRECTIONW WITH ABBREVIATIONS
# REPLACE HEADINGS
HeadingsFULL = ['EAST', 'WEST', 'SOUTH', 'NORTH', 'EASTBOUND', 'WESTBOUND', 'SOUTHBOUND', 'NORTHBOUND']
HeadingsABRV = ['E', 'W', 'S', 'N', 'EB', 'WB', 'SB', 'NB']
#------REPLACE ABBRV AND MOVE TO FRONT!
fc_df[inField] = fc_df[inField].str.strip()
fc_df[inField] = fc_df[inField].replace('\s+', ' ', regex=True)
#Copy Over StreetName
fc_df['StreetName_Copy'] = fc_df[inField]
#LOOP THROUGH, LOOK FOR STRING WITH MORE THAN 2 WORDS
def find_LargeStrings(x):
if x is not None and len(x.split()) > 2:
return x
if x is not None and len(x.split()) <= 2:
return ''
fc_df['StreetName_Copy'] = fc_df['StreetName_Copy'].apply(find_LargeStrings)
#REPLACE HEADINGS THAT ARE AT THE END OF THE STRING
for x,y in zip(HeadingsFULL, HeadingsABRV):
fc_df['StreetName_Copy'] = fc_df['StreetName_Copy'].str.replace(rf'\b{x}\b$', y, regex=True)
#MOVE HEADING TO NEW COLUMN
fc_df['Headings'] = fc_df['StreetName_Copy'].str.split().str[-1]
#LOOP THROUGH, LOOK FOR ABBRIEVIATIONS
def fix_direction(x):
if x in HeadingsABRV:
return x
else:
return ''
fc_df['Headings_Fixed'] = fc_df['Headings'].apply(fix_direction)
#If a heading has been fixed, drop that from the streetname
fc_df.loc[fc_df['Headings_Fixed'] != '', 'StreetName_Copy'] = fc_df.StreetName_Copy.str.rsplit(' ',1).str[0]
fc_df = fc_df.drop(['Headings'], 1)
#Repeat REPLACE of last word for duplicates headings:
for x,y in zip(HeadingsFULL, HeadingsABRV):
fc_df['StreetName_Copy'] = fc_df['StreetName_Copy'].str.replace(rf'\b{x}\b$', y, regex=True)
#Move last word to new column
fc_df['Headings2'] = fc_df['StreetName_Copy'].str.split().str[-1]
#LOOP THROUGH, LOOK FOR ABBRIEVIATIONS
def fix_direction(x):
if x in HeadingsABRV:
return x
else:
return ''
fc_df['Headings_Fixed2'] = fc_df['Headings2'].apply(fix_direction)
fc_df = fc_df.drop(['Headings2'], 1)
fc_df.loc[fc_df['Headings_Fixed2'] != '', 'StreetName_Copy'] = fc_df.StreetName_Copy.str.rsplit(' ',1).str[0]
fc_df['Headings_Final'] = fc_df['Headings_Fixed2'] + ' ' + fc_df['Headings_Fixed']
#Drop Left Over Fields
fc_df = fc_df.drop(['Headings_Fixed'], 1)
fc_df = fc_df.drop(['Headings_Fixed2'], 1)
#Look for large strings greater than 2 again
fc_df['StreetName_Clean'] = fc_df['StreetName_Copy'].apply(find_LargeStrings)
#REPLACE FIRST Word WITH ABBRV if it's a heading
for x,y in zip(HeadingsFULL, HeadingsABRV):
fc_df['StreetName_Clean'] = fc_df['StreetName_Clean'].str.replace(rf'^\b{x}\b', y, regex=True)
#CLEAN FIELDS
fc_df['StreetName_Clean'] = fc_df['StreetName_Clean'].str.strip()
fc_df.StreetName_Clean = fc_df.StreetName_Clean.replace('\s+', ' ', regex=True)
fc_df['StreetName_Copy'] = fc_df['StreetName_Copy'].str.strip()
fc_df.StreetName_Copy = fc_df.StreetName_Copy.replace('\s+', ' ', regex=True)
#MOVE FIRST StreetName_Copy OVER WHERE THE SECOND IS NULL
fc_df['StreetName_Clean'].loc[(fc_df['StreetName_Clean'] == '')] = fc_df['StreetName_Copy']
#ADD DIRECTIONS TO THE FRONT OF THE STREET NAME
fc_df['StreetName_Clean'] = fc_df['Headings_Final'] + ' ' + fc_df['StreetName_Clean']
#WHERE StreetName_Clean IS NULL, REPLACE WITH THE STREETNAME:
fc_df['StreetName_Clean'] = fc_df['StreetName_Clean'].str.strip()
fc_df.StreetName_Clean = fc_df.StreetName_Clean.replace('\s+', ' ', regex=True)
fc_df.loc[fc_df['StreetName_Clean'] == '', 'StreetName_Clean'] = fc_df[inField]
#Make new StreetName column
fc_df[inField] = fc_df['StreetName_Clean']
#Drop Left Over Fields
fc_df = fc_df.drop(['Headings_Final'], 1)
fc_df = fc_df.drop(['StreetName_Clean'], 1)
fc_df = fc_df.drop(['StreetName_Copy'], 1)
#remove WhiteSpace
fc_df[inField] = fc_df[inField].str.strip()
fc_df[inField] = fc_df[inField].replace('\s+', ' ', regex=True)
#-----------------------------------------------------------
#Replace street name prefix's with abbrv's
#READ CSV OF STREET PREFIX's and ABBREVIATIONS
config_file = os.path.join(os.path.dirname(__file__), "NameABBRV.csv")
csv_df = pd.read_csv(config_file)
csv_df.dropna(inplace = True)
csv_df['Streetname'] = csv_df['Streetname'].str.strip()
csv_df['Abbrv'] = csv_df['Abbrv'].str.strip()
#STREET LISTS
streetprefix_list = csv_df['Streetname'].tolist()
Abbrv_list = csv_df['Abbrv'].tolist()
#OTHERS:
Others = ['AV','BLVE','BL','CI','PARKWY','EX','PY','TE','PW','PK','BLV']
Others_re = ['AVE','BLVD','BLVD','CIR','PKY','EXPY','PKWY','TER','PKWY','PARK','BLVD']
#MAKE NEW FIELD WITH ONLY LAST ELEMENT
fc_df['ABBRV'] = fc_df[inField].str.split().str[-1]
#LOOP THROUGH, LOOK FOR ABBRIEVIATIONS
def fix_direction(x):
if x in streetprefix_list or x in Others:
return x
else:
return ''
#CREATED FIELD WITH ONLY ROAD PREFIX'S, ELSE none's
fc_df['ABBRV_fixed'] = fc_df['ABBRV'].apply(fix_direction)
#REPLACE PREFIX WITH ABBRV
for x,y in zip(streetprefix_list, Abbrv_list):
fc_df['ABBRV_fixed'] = fc_df['ABBRV_fixed'].str.replace(rf'\b{x}\b$', y, regex=True)
for x,y in zip(Others, Others_re):
fc_df['ABBRV_fixed'] = fc_df['ABBRV_fixed'].str.replace(rf'\b{x}\b$', y, regex=True)
#LOCATE WHERE THERE ARE Prefix's, REMOVE Prefix FROM STREETNAME COLUMN
#Since we just moved it above
fc_df.loc[fc_df['ABBRV_fixed'] != '', inField] = fc_df[inField].str.rsplit(' ',1).str[0]
#ADD THE abbrv back TO THE END OF FIELD
fc_df[inField] = fc_df[inField] + ' ' + fc_df['ABBRV_fixed']
fc_df[inField] = fc_df[inField].str.strip()
fc_df[inField] = fc_df[inField].replace('\s+', ' ', regex=True)
#Drop remaining fields
fc_df = fc_df.drop(['ABBRV_fixed'], 1)
fc_df = fc_df.drop(['ABBRV'], 1)
#-----------------------------------------------------------
#FLAG ROWS WITH LESS THAN ONE ELEMENT (NO PREFIX)
#CREATES NEW FIELD CALLED 'FLAG'
fc_df['FLAG'] = fc_df[inField]
#COUNT ELEMENTS IN EACH ROW OF STREET NAME, OVERWRITE FLAG ROWS WITH ELEMENT COUNT
fc_df['FLAG'] = fc_df[inField].str.split().str.len()
#SET UP FUNCTION TO ITERATE THROUGH FLAGS, LOOKING FOR WHERE STREETNAME HAS 1 ELEMENT OR LESS, OR 5 ELEMENTS OR MORE
def check_elements(x):
if x <= 1 or x >= 5:
return 'FLAG'
else:
return ''
fc_df['FLAG'] = fc_df['FLAG'].apply(check_elements)
#-----------------------------------------------------------
#BACK TO FEATURE LAYER, DF TO OUT FC
outFC_sp = fc_df.spatial.to_featureclass(os.path.join(outFC))
#PRINT TO MAP
outFC_sp
| 36.493878
| 119
| 0.62465
|
4aecefe825764240c385197d5ff10c3471109fb2
| 14,236
|
py
|
Python
|
tablemanager/forms.py
|
parksandwildlife/borgcollector
|
dab9464f2e58c7dbc039b4805bb894b168547938
|
[
"BSD-3-Clause"
] | 2
|
2016-01-20T02:26:06.000Z
|
2016-02-16T02:47:24.000Z
|
tablemanager/forms.py
|
parksandwildlife/borgcollector
|
dab9464f2e58c7dbc039b4805bb894b168547938
|
[
"BSD-3-Clause"
] | 4
|
2020-02-11T23:40:10.000Z
|
2021-09-22T04:27:50.000Z
|
tablemanager/forms.py
|
dbca-wa/borgcollector
|
dab9464f2e58c7dbc039b4805bb894b168547938
|
[
"BSD-3-Clause"
] | 4
|
2016-01-12T02:10:14.000Z
|
2017-11-09T13:53:16.000Z
|
import json
from itertools import ifilter
from django import forms
from django.core.exceptions import ObjectDoesNotExist,ValidationError
from django.forms.widgets import HiddenInput,TextInput
from django.contrib.admin.widgets import RelatedFieldWidgetWrapper
from tablemanager.models import (Normalise,NormalTable,Normalise_NormalTable,Publish,
Publish_NormalTable,ForeignTable,Input,NormalTable,Workspace,DataSource,
PublishChannel,DatasourceType)
from borg_utils.form_fields import GroupedModelChoiceField,CachedModelChoiceField
from borg_utils.widgets import MultiWidgetLayout
from borg_utils.form_fields import GeoserverSettingForm,MetaTilingFactorField,GridSetField,BorgSelect
from borg_utils.forms import BorgModelForm
from django.template import Context, Template
class ForeignTableForm(BorgModelForm):
"""
A form for ForeignTable Model
"""
def __init__(self, *args, **kwargs):
super(ForeignTableForm, self).__init__(*args, **kwargs)
#remove the empty label
#self.fields['server'].empty_label=None
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
#remote the "+" icon from html page because this field is readonly
self.fields['server'].widget = self.fields['server'].widget.widget
self.fields['server'].widget.attrs['readonly'] = True
class Meta:
model = ForeignTable
fields = "__all__"
widgets = {
'server': BorgSelect(),
}
class DataSourceForm(BorgModelForm):
"""
A form for DataSource Model
"""
CHANGE_TYPE = 100
def __init__(self, *args, **kwargs):
super(DataSourceForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
self.fields['type'].widget.attrs['readonly'] = True
def get_mode(self,data):
if data and "_change_type" in data:
return (DataSourceForm.CHANGE_TYPE,"change_type",True,False,('name','type'))
return super(DataSourceForm,self).get_mode(data)
def change_type(self):
if self.instance.type == DatasourceType.DATABASE:
self.data['sql'] = "CREATE SERVER {{self.name}} FOREIGN DATA WRAPPER oracle_fdw OPTIONS (dbserver '//<hostname>/<sid>');"
else:
self.data['sql'] = ""
class Meta:
model = DataSource
fields = "__all__"
widgets = {
'type': BorgSelect(attrs={"onChange":"django.jQuery('#datasource_form').append(\"<input type='hidden' name='_change_type' value=''>\");django.jQuery('#datasource_form').submit()"}),
'description': forms.TextInput(attrs={"style":"width:95%"})
}
class InputForm(BorgModelForm):
"""
A form for Input Model
"""
INSERT_FIELDS = 100
CHANGE_DATA_SOURCE = 101
CHANGE_FOREIGN_TABLE = 102
foreign_table = CachedModelChoiceField(queryset=ForeignTable.objects.all(),label_func=lambda table:table.name,required=False,choice_family="foreigntable",choice_name="foreigntable_options",
widget=BorgSelect(attrs={"onChange":"$('#input_form').append(\"<input type='hidden' name='_change_foreign_table' value=''>\"); $('#input_form').submit()"}))
def __init__(self, *args, **kwargs):
super(InputForm, self).__init__(*args, **kwargs)
#remote the "+" icon from html page because this will trigger onchange event and cause recusive submit html form to server
self.fields['data_source'].widget = self.fields['data_source'].widget.widget
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
self.fields['data_source'].widget.attrs['readonly'] = True
self.fields['foreign_table'].widget.attrs['readonly'] = True
def get_mode(self,data):
if data and "_insert_fields" in data:
return (InputForm.INSERT_FIELDS,"insert_fields",True,False,None)
elif data and "_change_data_source" in data:
return (InputForm.CHANGE_DATA_SOURCE,"change_data_source",True,False,('name','data_source'))
elif data and "_change_foreign_table" in data:
return (InputForm.CHANGE_DATA_SOURCE,"change_foreign_table",True,False,('name','data_source','foreign_table'))
return super(InputForm,self).get_mode(data)
def insert_fields(self):
self.data['source'] = self.instance.source
self.fields['foreign_table'].queryset = ForeignTable.objects.filter(server=self.instance.data_source)
self.fields['foreign_table'].choice_name = "foreigntable_options_{}".format(self.instance.data_source.name)
self.fields['foreign_table'].widget.choices = self.fields['foreign_table'].choices
def change_data_source(self):
if not hasattr(self.instance,"data_source"):
self.data['source'] = ""
elif self.instance.data_source.type == DatasourceType.FILE_SYSTEM:
self.data['source'] = self.instance.data_source.vrt
elif self.instance.data_source.type == DatasourceType.DATABASE:
self.fields['foreign_table'].queryset = ForeignTable.objects.filter(server=self.instance.data_source)
self.fields['foreign_table'].choice_name = "foreigntable_options_{}".format(self.instance.data_source.name)
self.fields['foreign_table'].widget.choices = self.fields['foreign_table'].choices
self.data['source'] = ""
else:
self.data['source'] = ""
def change_foreign_table(self):
self.data['source'] = str(Template(self.instance.data_source.vrt).render(Context({'self':self.instance,'db':Input.DB_TEMPLATE_CONTEXT})))
self.fields['foreign_table'].queryset = ForeignTable.objects.filter(server=self.instance.data_source)
self.fields['foreign_table'].choice_name = "foreigntable_options_{}".format(self.instance.data_source.name)
self.fields['foreign_table'].widget.choices = self.fields['foreign_table'].choices
class Meta:
model = Input
fields = "__all__"
widgets = {
'data_source': BorgSelect(attrs={"onChange":"$('#input_form').append(\"<input type='hidden' name='_change_data_source' value=''>\"); $('#input_form').submit();"}),
}
class NormalTableForm(BorgModelForm):
"""
A form for NormalTable Model
"""
def __init__(self, *args, **kwargs):
super(NormalTableForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
class Meta:
model = NormalTable
fields = "__all__"
class PublishChannelForm(BorgModelForm):
"""
A form for PublishChannel Model
"""
def __init__(self, *args, **kwargs):
super(PublishChannelForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
class Meta:
model = PublishChannel
fields = "__all__"
class WorkspaceForm(BorgModelForm):
"""
A form for Workspace Model
"""
def __init__(self, *args, **kwargs):
super(WorkspaceForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
self.fields['publish_channel'].widget = self.fields['publish_channel'].widget.widget
self.fields['publish_channel'].widget.attrs['readonly'] = True
class Meta:
model = Workspace
fields = "__all__"
widgets = {
'publish_channel': BorgSelect(),
}
class NormaliseForm(BorgModelForm):
"""
A form for Normalise Model
"""
input_table = GroupedModelChoiceField('data_source',queryset=Input.objects.all(),required=True,choice_family="input",choice_name="input_options")
dependents = forms.ModelMultipleChoiceField(queryset=NormalTable.objects.all(),required=False)
output_table = forms.ModelChoiceField(queryset=NormalTable.objects.all(),required=False,widget=BorgSelect())
def __init__(self, *args, **kwargs):
kwargs['initial']=kwargs.get('initial',{})
if 'instance' in kwargs and kwargs['instance']:
try:
kwargs['initial']['output_table']=kwargs['instance'].normaltable
except ObjectDoesNotExist:
pass
dependents = []
for relation in (kwargs['instance'].relations):
if relation:
for normal_table in relation.normal_tables:
if normal_table: dependents.append(normal_table)
kwargs['initial']['dependents'] = dependents
super(NormaliseForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
self.fields['output_table'].widget.attrs['readonly'] = True
def _post_clean(self):
super(NormaliseForm,self)._post_clean()
if self.errors:
return
if 'output_table' in self.cleaned_data:
self.instance.normal_table = self.cleaned_data['output_table']
else:
self.instance.normal_table = None
if 'dependents' in self.cleaned_data:
sorted_dependents = self.cleaned_data['dependents'].order_by('pk')
else:
sorted_dependents = []
pos = 0
normal_table_pos = 0
relation_index = 0
length = len(sorted_dependents)
for relation in (self.instance.relations):
normal_table_pos = 0
if pos < length:
if relation is None:
relation = Normalise_NormalTable()
self.instance.set_relation(relation_index,relation)
if relation is not None:
for normal_table in relation.normal_tables:
if pos < length:
relation.set_normal_table(normal_table_pos, sorted_dependents[pos])
elif relation:
relation.set_normal_table(normal_table_pos, None)
pos += 1
normal_table_pos += 1
relation_index += 1
class Meta:
model = Normalise
fields = ('name','input_table','dependents','output_table','sql')
class PublishForm(GeoserverSettingForm,BorgModelForm):
"""
A form for normal table's Publish Model
"""
create_cache_layer = forms.BooleanField(required=False,label="create_cache_layer",initial=True)
create_cache_layer.setting_type = "geoserver_setting"
server_cache_expire = forms.IntegerField(label="server_cache_expire",min_value=0,required=False,initial=0,help_text="Expire server cache after n seconds (set to 0 to use source setting)")
server_cache_expire.setting_type = "geoserver_setting"
client_cache_expire = forms.IntegerField(label="client_cache_expire",min_value=0,required=False,initial=0,help_text="Expire client cache after n seconds (set to 0 to use source setting)")
client_cache_expire.setting_type = "geoserver_setting"
workspace = GroupedModelChoiceField('publish_channel',queryset=Workspace.objects.all(),required=True,choice_family="workspace",choice_name="workspace_choices",widget=BorgSelect())
input_table = GroupedModelChoiceField('data_source',queryset=Input.objects.all(),required=False,choice_family="input",choice_name="input_options")
dependents = forms.ModelMultipleChoiceField(queryset=NormalTable.objects.all(),required=False)
def __init__(self, *args, **kwargs):
kwargs['initial']=kwargs.get('initial',{})
self.get_setting_from_model(*args,**kwargs)
if 'instance' in kwargs and kwargs['instance']:
#populate the dependents field value from table data
dependents = []
for relation in (kwargs['instance'].relations):
if relation:
for normal_table in relation.normal_tables:
if normal_table: dependents.append(normal_table)
kwargs['initial']['dependents'] = dependents
super(PublishForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
self.fields['workspace'].widget.attrs['readonly'] = True
def _post_clean(self):
super(PublishForm,self)._post_clean()
if self.errors:
return
#populate the value of the relation columns
if 'dependents' in self.cleaned_data:
sorted_dependents = self.cleaned_data['dependents'].order_by('pk')
else:
sorted_dependents = []
pos = 0
normal_table_pos = 0
relation_index = 0
length = len(sorted_dependents)
for relation in (self.instance.relations):
normal_table_pos = 0
if pos < length:
if relation is None:
relation = Publish_NormalTable()
self.instance.set_relation(relation_index,relation)
if relation is not None:
for normal_table in relation.normal_tables:
if pos < length:
relation.set_normal_table(normal_table_pos, sorted_dependents[pos])
elif relation:
relation.set_normal_table(normal_table_pos, None)
pos += 1
normal_table_pos += 1
relation_index += 1
if self.instance and self.instance.is_spatial:
self.set_setting_to_model()
class Meta:
model = Publish
fields = ('name','workspace','interval','status','input_table','dependents','priority','sql','create_extra_index_sql')
| 44.074303
| 197
| 0.650042
|
ec421df0152904b67af7322daf36adc32ed4e8d5
| 800
|
py
|
Python
|
schemas/ModuleBuild.py
|
fresch/maven-build-tracker
|
ad850f2e290423b77079db64391e403c565cdef1
|
[
"MIT"
] | null | null | null |
schemas/ModuleBuild.py
|
fresch/maven-build-tracker
|
ad850f2e290423b77079db64391e403c565cdef1
|
[
"MIT"
] | null | null | null |
schemas/ModuleBuild.py
|
fresch/maven-build-tracker
|
ad850f2e290423b77079db64391e403c565cdef1
|
[
"MIT"
] | null | null | null |
from typing import List
from datetime import datetime
from uuid import UUID
from pydantic import BaseModel
from .SubModuleBuild import SubModuleBuild, SubModuleBuildResponse
from .BuildStatus import BuildStatus
class ModuleBuild(BaseModel):
module: str
build_time: str
result: BuildStatus
finished_at: datetime
maven_opts: str
uname: str
uuid: UUID
cpu: str
mem: int
submodules: List[SubModuleBuild]
class Config:
orm_mode = True
class ModuleBuildResponse(BaseModel):
id: int
module: str
# build_time: str
# result: BuildStatus
# finished_at: str
# maven_opts: str
# uname: str
# uuid: UUID
# cpu: str
# mem: int
submodules: List[SubModuleBuildResponse]
class Config:
orm_mode = True
| 19.047619
| 66
| 0.68625
|
e6211b378f1643fe6dc1a4b30228853e8d7a8763
| 10,864
|
py
|
Python
|
src/elasticsearch/addl_index_transformations/portal/translate.py
|
sennetconsortium/search-api
|
01a5c0ab8ec6abd147e5b04477ba10f80fedfdc3
|
[
"MIT"
] | null | null | null |
src/elasticsearch/addl_index_transformations/portal/translate.py
|
sennetconsortium/search-api
|
01a5c0ab8ec6abd147e5b04477ba10f80fedfdc3
|
[
"MIT"
] | 7
|
2022-02-08T19:39:14.000Z
|
2022-03-18T20:26:10.000Z
|
src/elasticsearch/addl_index_transformations/portal/translate.py
|
sennetconsortium/search-api
|
01a5c0ab8ec6abd147e5b04477ba10f80fedfdc3
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import re
from datetime import datetime
from collections import defaultdict
from yaml import safe_load as load_yaml
from libs.assay_type import AssayType
class TranslationException(Exception):
pass
def _unexpected(s):
return f'No translation for "{s}"'
def translate(doc):
_add_metadata_metadata_placeholder(doc)
_translate_file_description(doc)
_translate_status(doc)
_translate_organ(doc)
_translate_donor_metadata(doc)
_translate_specimen_type(doc)
_translate_data_type(doc)
_translate_timestamp(doc)
_translate_access_level(doc)
_translate_external_consortium(doc)
# Utils:
_enums_dir = Path(__file__).parent.parent.parent.parent / 'search-schema' / 'data' / 'definitions' / 'enums'
_enums = {path.stem: load_yaml(path.read_text()) for path in _enums_dir.iterdir()}
def _map(doc, key, map):
# The recursion is usually not needed...
# but better to do it everywhere than to miss one case.
if key in doc:
doc[f'mapped_{key}'] = map(doc[key])
if 'donor' in doc:
_map(doc['donor'], key, map)
if 'origin_sample' in doc:
_map(doc['origin_sample'], key, map)
if 'source_sample' in doc:
for sample in doc['source_sample']:
_map(sample, key, map)
if 'ancestors' in doc:
for ancestor in doc['ancestors']:
_map(ancestor, key, map)
def _add_metadata_metadata_placeholder(doc):
'''
For datasets, the "metadata" used by the portal is actually at
"metadata.metadata" and in dev-search, there is a boolean facet
that looks for this path. Samples and Donors don't follow this pattern,
but to enable the boolean facet, we add a placeholder.
>>> doc = {'entity_type': 'Donor', 'metadata': {}}
>>> _add_metadata_metadata_placeholder(doc)
>>> assert 'metadata' in doc['metadata']
>>> doc = {'entity_type': 'Donor'}
>>> _add_metadata_metadata_placeholder(doc)
>>> assert 'metadata' not in doc
>>> doc = {'entity_type': 'Dataset', 'metadata': {}}
>>> _add_metadata_metadata_placeholder(doc)
>>> assert 'metadata' not in doc['metadata']
'''
if doc['entity_type'] in ['Donor', 'Sample'] and 'metadata' in doc:
doc['metadata']['metadata'] = {'has_metadata': True}
# File description:
def _translate_file_description(doc):
'''
>>> doc = {'files': [{
... "description": "OME-TIFF pyramid file",
... "edam_term": "EDAM_1.24.format_3727",
... "is_qa_qc": False,
... "rel_path": "ometiff-pyramids/stitched/expressions/reg1_stitched_expressions.ome.tif",
... "size": 123456789,
... "type": "unknown"
... }]}
>>> _translate_file_description(doc)
>>> from pprint import pprint
>>> pprint(doc)
{'files': [{'description': 'OME-TIFF pyramid file',
'edam_term': 'EDAM_1.24.format_3727',
'is_qa_qc': False,
'mapped_description': 'OME-TIFF pyramid file (TIF file)',
'rel_path': 'ometiff-pyramids/stitched/expressions/reg1_stitched_expressions.ome.tif',
'size': 123456789,
'type': 'unknown'}]}
'''
for file in doc.get('files', []):
extension = file['rel_path'].split('.')[-1].upper()
file['mapped_description'] = file['description'] + f' ({extension} file)'
# Data access level:
def _translate_access_level(doc):
'''
>>> doc = {'data_access_level': 'consortium'}
>>> _translate_access_level(doc); doc
{'data_access_level': 'consortium', 'mapped_data_access_level': 'Consortium'}
>>> doc = {'data_access_level': 'top-secret'}
>>> _translate_access_level(doc); doc
{'data_access_level': 'top-secret', 'mapped_data_access_level': 'No translation for "top-secret"'}
'''
_map(doc, 'data_access_level', _access_level_map)
def _access_level_map(access_level):
if access_level not in _enums['data_access_levels'].keys():
return _unexpected(access_level)
return access_level.title()
# External consortium:
def _translate_external_consortium(doc):
'''
>>> doc = {}
>>> _translate_external_consortium(doc); doc
{'mapped_consortium': 'HuBMAP'}
>>> doc = {'group_name': 'Inside HuBMAP'}
>>> _translate_external_consortium(doc); doc
{'group_name': 'Inside HuBMAP', 'mapped_consortium': 'HuBMAP'}
>>> doc = {'group_name': 'EXT - Outside HuBMAP'}
>>> _translate_external_consortium(doc); doc
{'group_name': 'EXT - Outside HuBMAP', 'mapped_external_group_name': 'Outside HuBMAP', 'mapped_consortium': 'Outside HuBMAP'}
'''
group_name = doc.get('group_name')
if group_name is not None and 'EXT' in group_name:
mapped_consortium = group_name.replace('EXT - ', '')
doc['mapped_external_group_name'] = mapped_consortium
else:
mapped_consortium = 'HuBMAP'
doc['mapped_consortium'] = mapped_consortium
# Timestamp:
def _translate_timestamp(doc):
'''
>>> doc = {
... 'create_timestamp': '1575489509656',
... 'last_modified_timestamp': 1590017663118
... }
>>> _translate_timestamp(doc)
>>> from pprint import pprint
>>> pprint(doc)
{'create_timestamp': '1575489509656',
'last_modified_timestamp': 1590017663118,
'mapped_create_timestamp': '2019-12-04 19:58:29',
'mapped_last_modified_timestamp': '2020-05-20 23:34:23'}
'''
_map(doc, 'create_timestamp', _timestamp_map)
_map(doc, 'last_modified_timestamp', _timestamp_map)
def _timestamp_map(timestamp):
return (
datetime.utcfromtimestamp(int(timestamp) / 1000)
.strftime('%Y-%m-%d %H:%M:%S')
)
# Status:
def _translate_status(doc):
'''
>>> doc = {'status': 'New'}
>>> _translate_status(doc); doc
{'status': 'New', 'mapped_status': 'New'}
>>> doc = {'status': 'Foobar'}
>>> _translate_status(doc); doc
{'status': 'Foobar', 'mapped_status': 'No translation for "Foobar"'}
'''
_map(doc, 'status', _status_map)
def _status_map(status):
if status not in _enums['dataset_status_types'].keys():
return _unexpected(status)
return status
# Organ:
def _translate_organ(doc):
'''
>>> doc = {'origin_sample': {'organ': 'RK'}}
>>> _translate_organ(doc); doc
{'origin_sample': {'organ': 'RK', 'mapped_organ': 'Kidney (Right)'}}
>>> doc = {'origin_sample': {'organ': 'ZZ'}}
>>> _translate_organ(doc); doc
{'origin_sample': {'organ': 'ZZ', 'mapped_organ': 'No translation for "ZZ"'}}
'''
_map(doc, 'organ', _organ_map)
def _organ_map(k):
if k not in _organ_dict:
return _unexpected(k)
return _organ_dict[k]
_organ_dict = {
k: v['description']
for k, v in _enums['organ_types'].items()
}
# Specimen type:
def _translate_specimen_type(doc):
'''
>>> doc = {'specimen_type': 'fresh_frozen_tissue'}
>>> _translate_specimen_type(doc); doc
{'specimen_type': 'fresh_frozen_tissue', 'mapped_specimen_type': 'Fresh frozen tissue'}
>>> doc = {'specimen_type': 'xyz'}
>>> _translate_specimen_type(doc); doc
{'specimen_type': 'xyz', 'mapped_specimen_type': 'No translation for "xyz"'}
'''
_map(doc, 'specimen_type', _specimen_types_map)
def _specimen_types_map(k):
if k not in _specimen_types_dict:
return _unexpected(k)
return _specimen_types_dict[k]
_specimen_types_dict = {
k: v['description']
for k, v in _enums['tissue_sample_types'].items()
}
# Assay type:
def _translate_data_type(doc):
'''
>>> doc = {'data_types': ['AF']}
>>> _translate_data_type(doc); doc
{'data_types': ['AF'], 'mapped_data_types': ['Autofluorescence Microscopy']}
>>> doc = {'data_types': ['image_pyramid', 'AF']}
>>> _translate_data_type(doc); doc
{'data_types': ['image_pyramid', 'AF'], 'mapped_data_types': ['Autofluorescence Microscopy [Image Pyramid]']}
>>> doc = {'data_types': ['salmon_rnaseq_10x_sn']}
>>> _translate_data_type(doc); doc
{'data_types': ['salmon_rnaseq_10x_sn'], 'mapped_data_types': ['snRNA-seq [Salmon]']}
>>> doc = {'data_types': ['xyz', 'image_pyramid']}
>>> _translate_data_type(doc); doc
{'data_types': ['xyz', 'image_pyramid'], 'mapped_data_types': ['No translation for "[\\'xyz\\', \\'image_pyramid\\']"']}
'''
_map(doc, 'data_types', _data_types_map)
def _data_types_map(ks):
assert len(ks) == 1 or (len(ks) == 2 and ('image_pyramid' in ks or 'Image Pyramid' in ks)), \
f"Maximum 2 types, and one should be image pyramid: {ks}"
single_key = ks[0] if len(ks) == 1 else ks
try:
r = AssayType(single_key).description
except RuntimeError:
r = _unexpected(single_key)
return [r]
# Donor metadata:
def _translate_donor_metadata(doc):
'''
>>> doc = {"metadata": None}
>>> _translate_donor_metadata(doc)
>>> doc
{'metadata': None, 'mapped_metadata': {}}
Multi-valued fields are supported:
>>> doc = {
... "metadata": {
... "organ_donor_data": [{
... "preferred_term": "Diabetes",
... "grouping_concept_preferred_term": "Medical history"
... },
... {
... "preferred_term": "Cancer",
... "grouping_concept_preferred_term": "Medical history"
... }]
... }
... }
>>> _translate_donor_metadata(doc)
>>> doc['mapped_metadata']
{'medical_history': ['Diabetes', 'Cancer']}
Numeric fields are turned into floats, and units are their own field:
>>> doc = {
... "metadata": {
... "organ_donor_data": [{
... "data_type": "Numeric",
... "data_value": "87.6",
... "grouping_concept_preferred_term": "Weight",
... "units": "kg"
... }]
... }
... }
>>> _translate_donor_metadata(doc)
>>> doc['mapped_metadata']
{'weight_value': [87.6], 'weight_unit': ['kg']}
'''
_map(doc, 'metadata', _donor_metadata_map)
def _donor_metadata_map(metadata):
if metadata is None:
return {}
donor_metadata = metadata.get('organ_donor_data') or metadata.get('living_donor_data') or {}
mapped_metadata = defaultdict(list)
for kv in donor_metadata:
term = kv['grouping_concept_preferred_term']
key = re.sub(r'\W+', '_', term).lower()
value = (
float(kv['data_value'])
if 'data_type' in kv and kv['data_type'] == 'Numeric'
else kv['preferred_term']
)
if 'units' not in kv or not len(kv['units']):
mapped_metadata[key].append(value)
continue
mapped_metadata[f'{key}_value'].append(value)
mapped_metadata[f'{key}_unit'].append(kv['units'])
return dict(mapped_metadata)
| 29.928375
| 129
| 0.618925
|
521702b6f29e8c6adf41e9ec178450a1d95cc54d
| 2,633
|
py
|
Python
|
test/llc/low-level/Expected/AcceptanceTests/LLCUpdateOneLowLevel/llcpackagelowlevel/rest/params/_request_builders.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 35
|
2018-04-03T12:15:53.000Z
|
2022-03-11T14:03:34.000Z
|
test/llc/low-level/Expected/AcceptanceTests/LLCUpdateOneLowLevel/llcpackagelowlevel/rest/params/_request_builders.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 652
|
2017-08-28T22:44:41.000Z
|
2022-03-31T21:20:31.000Z
|
test/llc/low-level/Expected/AcceptanceTests/LLCUpdateOneLowLevel/llcpackagelowlevel/rest/params/_request_builders.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 29
|
2017-08-28T20:57:01.000Z
|
2022-03-11T14:03:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.rest import HttpRequest
from msrest import Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
_SERIALIZER = Serializer()
# fmt: off
def build_get_required_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get true Boolean value on path.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword parameter3: I am a required parameter and I'm last in Swagger.
:paramtype parameter3: str
:keyword parameter1: I am a required parameter with a client default value.
:paramtype parameter1: str
:keyword parameter2: I was a required parameter, but now I'm optional.
:paramtype parameter2: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
parameter3 = kwargs.pop('parameter3') # type: str
parameter1 = kwargs.pop('parameter1', "DefaultValue") # type: str
parameter2 = kwargs.pop('parameter2', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/llc/parameters')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['parameter1'] = _SERIALIZER.query("parameter1", parameter1, 'str')
if parameter2 is not None:
query_parameters['parameter2'] = _SERIALIZER.query("parameter2", parameter2, 'str')
query_parameters['parameter3'] = _SERIALIZER.query("parameter3", parameter3, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
| 38.720588
| 99
| 0.665021
|
bf7c4d1af38329e61f041c9501307c378ec4125b
| 36,354
|
py
|
Python
|
scripts/grace_spatial_maps.py
|
geodeepak/GRACE_HYDL
|
5aeeb289f2f8e2d2e29b3b01bded22daf3fb4413
|
[
"MIT"
] | null | null | null |
scripts/grace_spatial_maps.py
|
geodeepak/GRACE_HYDL
|
5aeeb289f2f8e2d2e29b3b01bded22daf3fb4413
|
[
"MIT"
] | null | null | null |
scripts/grace_spatial_maps.py
|
geodeepak/GRACE_HYDL
|
5aeeb289f2f8e2d2e29b3b01bded22daf3fb4413
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
u"""
grace_spatial_maps.py
Written by Tyler Sutterley (10/2021)
Reads in GRACE/GRACE-FO spherical harmonic coefficients and exports
monthly spatial fields
Will correct with the specified GIA model group, destripe/smooth/process,
and export the data in specified units
Spatial output units: cm w.e., mm geoid height, mm elastic uplift,
microgal gravity perturbation or surface pressure (mbar)
COMMAND LINE OPTIONS:
--help: list the command line options
-D X, --directory X: Working data directory
-O X, --output-directory X: output directory for spatial files
-P X, --file-prefix X: prefix string for input and output files
-c X, --center X: GRACE/GRACE-FO processing center
-r X, --release X: GRACE/GRACE-FO data release
-p X, --product X: GRACE/GRACE-FO Level-2 data product
-S X, --start X: starting GRACE/GRACE-FO month
-E X, --end X: ending GRACE/GRACE-FO month
-N X, --missing X: Missing GRACE/GRACE-FO months
--lmin X: minimum spherical harmonic degree
-l X, --lmax X: maximum spherical harmonic degree
-m X, --mmax X: maximum spherical harmonic order
-R X, --radius X: Gaussian smoothing radius (km)
-d, --destripe: use decorrelation filter (destriping filter)
-n X, --love X: Load Love numbers dataset
0: Han and Wahr (1995) values from PREM
1: Gegout (2005) values from PREM
2: Wang et al. (2012) values from PREM
--reference X: Reference frame for load love numbers
CF: Center of Surface Figure (default)
CM: Center of Mass of Earth System
CE: Center of Mass of Solid Earth
-F X, --format X: input/output data format
ascii
netCDF4
HDF5
-G X, --gia X: GIA model type to read
IJ05-R2: Ivins R2 GIA Models
W12a: Whitehouse GIA Models
SM09: Simpson/Milne GIA Models
ICE6G: ICE-6G GIA Models
Wu10: Wu (2010) GIA Correction
AW13-ICE6G: Geruo A ICE-6G GIA Models
Caron: Caron JPL GIA Assimilation
ICE6G-D: ICE-6G Version-D GIA Models
ascii: reformatted GIA in ascii format
netCDF4: reformatted GIA in netCDF4 format
HDF5: reformatted GIA in HDF5 format
--gia-file X: GIA file to read
--atm-correction: Apply atmospheric jump correction coefficients
--pole-tide: Correct for pole tide drift
--geocenter X: Update Degree 1 coefficients with SLR or derived values
Tellus: GRACE/GRACE-FO TN-13 coefficients from PO.DAAC
SLR: satellite laser ranging coefficients from CSR
SLF: Sutterley and Velicogna coefficients, Remote Sensing (2019)
Swenson: GRACE-derived coefficients from Sean Swenson
GFZ: GRACE/GRACE-FO coefficients from GFZ GravIS
--interpolate-geocenter: Least-squares model missing Degree 1 coefficients
--slr-c20 X: Replace C20 coefficients with SLR values
CSR: use values from CSR (TN-07,TN-09,TN-11)
GFZ: use values from GFZ
GSFC: use values from GSFC (TN-14)
--slr-21 X: Replace C21 and S21 coefficients with SLR values
CSR: use values from CSR
GFZ: use values from GFZ GravIS
GSFC: use values from GSFC
--slr-22 X: Replace C22 and S22 coefficients with SLR values
CSR: use values from CSR
--slr-c30 X: Replace C30 coefficients with SLR values
CSR: use values from CSR (5x5 with 6,1)
GFZ: use values from GFZ GravIS
GSFC: use values from GSFC (TN-14)
--slr-c50 X: Replace C50 coefficients with SLR values
CSR: use values from CSR (5x5 with 6,1)
GSFC: use values from GSFC
-U X, --units X: output units
1: cm of water thickness
2: mm of geoid height
3: mm of elastic crustal deformation [Davis 2004]
4: microGal gravitational perturbation
5: mbar equivalent surface pressure
--spacing X: spatial resolution of output data (dlon,dlat)
--interval X: output grid interval
1: (0:360, 90:-90)
2: (degree spacing/2)
3: non-global grid (set with defined bounds)
--bounds X: non-global grid bounding box (minlon,maxlon,minlat,maxlat)
--mean-file X: GRACE/GRACE-FO mean file to remove from the harmonic data
--mean-format X: Input data format for GRACE/GRACE-FO mean file
ascii
netCDF4
HDF5
gfc
--mask X: Land-sea mask for redistributing land water flux
--remove-file X: Monthly files to be removed from the GRACE/GRACE-FO data
--remove-format X: Input data format for files to be removed
ascii
netCDF4
HDF5
index-ascii
index-netCDF4
index-HDF5
--redistribute-removed: redistribute removed mass fields over the ocean
--log: Output log of files created for each job
-V, --verbose: verbose output of processing run
-M X, --mode X: Permissions mode of the files created
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
dateutil: powerful extensions to datetime
https://dateutil.readthedocs.io/en/stable/
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
h5py: Pythonic interface to the HDF5 binary data format.
https://www.h5py.org/
PROGRAM DEPENDENCIES:
grace_input_months.py: Reads GRACE/GRACE-FO files for a specified spherical
harmonic degree and order and for a specified date range
Includes degree 1 with with Swenson values (if specified)
Replaces C20,C21,S21,C22,S22,C30 and C50 with SLR values (if specified)
read_GIA_model.py: reads harmonics for a glacial isostatic adjustment model
read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995)
plm_holmes.py: Computes fully normalized associated Legendre polynomials
gauss_weights.py: Computes the Gaussian weights as a function of degree
ocean_stokes.py: converts a land-sea mask to a series of spherical harmonics
gen_stokes.py: converts a spatial field into a series of spherical harmonics
geocenter.py: converts between spherical harmonics and geocenter variations
harmonic_summation.py: calculates a spatial field from spherical harmonics
units.py: class for converting GRACE/GRACE-FO Level-2 data to specific units
harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO
destripe_harmonics.py: calculates the decorrelation (destriping) filter
and filters the GRACE/GRACE-FO coefficients for striping errors
ncdf_read_stokes.py: reads spherical harmonic netcdf files
ncdf_stokes.py: writes output spherical harmonic data to netcdf
hdf5_read_stokes.py: reads spherical harmonic HDF5 files
hdf5_stokes.py: writes output spherical harmonic data to HDF5
spatial.py: spatial data class for reading, writing and processing data
ncdf_read.py: reads input spatial data from netCDF4 files
hdf5_read.py: reads input spatial data from HDF5 files
ncdf_write.py: writes output spatial data to netCDF4
hdf5_write.py: writes output spatial data to HDF5
utilities.py: download and management utilities for files
UPDATE HISTORY:
Updated 11/2021: add GSFC low-degree harmonics
Updated 10/2021: using python logging for handling verbose output
add more choices for setting input format of the removed files
Updated 07/2021: simplified file imports using wrappers in harmonics
added path to default land-sea mask for mass redistribution
remove choices for argparse processing centers
Updated 06/2021: switch from parameter files to argparse arguments
Updated 05/2021: define int/float precision to prevent deprecation warning
Updated 04/2021: include parameters for replacing C21/S21 and C22/S22
Updated 02/2021: changed remove index to files with specified formats
Updated 01/2021: harmonics object output from gen_stokes.py/ocean_stokes.py
Updated 12/2020: added more love number options and from gfc for mean files
Updated 10/2020: use argparse to set command line parameters
Updated 08/2020: use utilities to define path to load love numbers file
Updated 06/2020: using spatial data class for output operations
Updated 05/2020: for public release
"""
from __future__ import print_function
import sys
import os
import re
import time
import logging
import numpy as np
import argparse
import traceback
import gravity_toolkit.utilities as utilities
from gravity_toolkit.grace_input_months import grace_input_months
from gravity_toolkit.read_GIA_model import read_GIA_model
from gravity_toolkit.read_love_numbers import read_love_numbers
from gravity_toolkit.plm_holmes import plm_holmes
from gravity_toolkit.gauss_weights import gauss_weights
from gravity_toolkit.ocean_stokes import ocean_stokes
from gravity_toolkit.harmonic_summation import harmonic_summation
from gravity_toolkit.harmonics import harmonics
from gravity_toolkit.spatial import spatial
from gravity_toolkit.units import units
#-- PURPOSE: keep track of threads
def info(args):
logging.info(os.path.basename(sys.argv[0]))
logging.info(args)
logging.info('module name: {0}'.format(__name__))
if hasattr(os, 'getppid'):
logging.info('parent process: {0:d}'.format(os.getppid()))
logging.info('process id: {0:d}'.format(os.getpid()))
#-- PURPOSE: read load love numbers for the range of spherical harmonic degrees
def load_love_numbers(LMAX, LOVE_NUMBERS=0, REFERENCE='CF'):
"""
Reads PREM load Love numbers for the range of spherical harmonic degrees
and applies isomorphic parameters
Arguments
---------
LMAX: maximum spherical harmonic degree
Keyword arguments
-----------------
LOVE_NUMBERS: Load Love numbers dataset
0: Han and Wahr (1995) values from PREM
1: Gegout (2005) values from PREM
2: Wang et al. (2012) values from PREM
REFERENCE: Reference frame for calculating degree 1 love numbers
CF: Center of Surface Figure (default)
CM: Center of Mass of Earth System
CE: Center of Mass of Solid Earth
Returns
-------
kl: Love number of Gravitational Potential
hl: Love number of Vertical Displacement
ll: Love number of Horizontal Displacement
"""
#-- load love numbers file
if (LOVE_NUMBERS == 0):
#-- PREM outputs from Han and Wahr (1995)
#-- https://doi.org/10.1111/j.1365-246X.1995.tb01819.x
love_numbers_file = utilities.get_data_path(
['data','love_numbers'])
header = 2
columns = ['l','hl','kl','ll']
elif (LOVE_NUMBERS == 1):
#-- PREM outputs from Gegout (2005)
#-- http://gemini.gsfc.nasa.gov/aplo/
love_numbers_file = utilities.get_data_path(
['data','Load_Love2_CE.dat'])
header = 3
columns = ['l','hl','ll','kl']
elif (LOVE_NUMBERS == 2):
#-- PREM outputs from Wang et al. (2012)
#-- https://doi.org/10.1016/j.cageo.2012.06.022
love_numbers_file = utilities.get_data_path(
['data','PREM-LLNs-truncated.dat'])
header = 1
columns = ['l','hl','ll','kl','nl','nk']
#-- LMAX of load love numbers from Han and Wahr (1995) is 696.
#-- from Wahr (2007) linearly interpolating kl works
#-- however, as we are linearly extrapolating out, do not make
#-- LMAX too much larger than 696
#-- read arrays of kl, hl, and ll Love Numbers
hl,kl,ll = read_love_numbers(love_numbers_file, LMAX=LMAX, HEADER=header,
COLUMNS=columns, REFERENCE=REFERENCE, FORMAT='tuple')
#-- return a tuple of load love numbers
return (hl,kl,ll)
#-- PURPOSE: import GRACE/GRACE-FO files for a given months range
#-- Converts the GRACE/GRACE-FO harmonics applying the specified procedures
def grace_spatial_maps(base_dir, PROC, DREL, DSET, LMAX, RAD,
START=None,
END=None,
MISSING=None,
LMIN=None,
MMAX=None,
LOVE_NUMBERS=0,
REFERENCE=None,
DESTRIPE=False,
UNITS=None,
DDEG=None,
INTERVAL=None,
BOUNDS=None,
GIA=None,
GIA_FILE=None,
ATM=False,
POLE_TIDE=False,
DEG1=None,
MODEL_DEG1=False,
SLR_C20=None,
SLR_21=None,
SLR_22=None,
SLR_C30=None,
SLR_C50=None,
DATAFORM=None,
MEAN_FILE=None,
MEANFORM=None,
REMOVE_FILES=None,
REMOVE_FORMAT=None,
REDISTRIBUTE_REMOVED=False,
LANDMASK=None,
OUTPUT_DIRECTORY=None,
FILE_PREFIX=None,
VERBOSE=False,
MODE=0o775):
#-- recursively create output directory if not currently existing
if not os.access(OUTPUT_DIRECTORY, os.F_OK):
os.makedirs(OUTPUT_DIRECTORY, mode=MODE, exist_ok=True)
#-- list object of output files for file logs (full path)
output_files = []
#-- file information
suffix = dict(ascii='txt', netCDF4='nc', HDF5='H5')
#-- read arrays of kl, hl, and ll Love Numbers
hl,kl,ll = load_love_numbers(LMAX, LOVE_NUMBERS=LOVE_NUMBERS,
REFERENCE=REFERENCE)
#-- Calculating the Gaussian smoothing for radius RAD
if (RAD != 0):
wt = 2.0*np.pi*gauss_weights(RAD,LMAX)
gw_str = '_r{0:0.0f}km'.format(RAD)
else:
#-- else = 1
wt = np.ones((LMAX+1))
gw_str = ''
#-- flag for spherical harmonic order
MMAX = np.copy(LMAX) if not MMAX else MMAX
order_str = 'M{0:d}'.format(MMAX) if (MMAX != LMAX) else ''
#-- reading GRACE months for input date range
#-- replacing low-degree harmonics with SLR values if specified
#-- include degree 1 (geocenter) harmonics if specified
#-- correcting for Pole-Tide and Atmospheric Jumps if specified
Ylms = grace_input_months(base_dir, PROC, DREL, DSET, LMAX,
START, END, MISSING, SLR_C20, DEG1, MMAX=MMAX,
SLR_21=SLR_21, SLR_22=SLR_22, SLR_C30=SLR_C30, SLR_C50=SLR_C50,
MODEL_DEG1=MODEL_DEG1, ATM=ATM, POLE_TIDE=POLE_TIDE)
#-- convert to harmonics object and remove mean if specified
GRACE_Ylms = harmonics().from_dict(Ylms)
GRACE_Ylms.directory = Ylms['directory']
GRACE_Ylms.title = Ylms['title']
#-- default file prefix
if not FILE_PREFIX:
FILE_PREFIX = GRACE_Ylms.title
#-- use a mean file for the static field to remove
if MEAN_FILE:
#-- read data form for input mean file (ascii, netCDF4, HDF5, gfc)
mean_Ylms = harmonics().from_file(MEAN_FILE,format=MEANFORM,date=False)
#-- remove the input mean
GRACE_Ylms.subtract(mean_Ylms)
else:
GRACE_Ylms.mean(apply=True)
#-- date information of GRACE/GRACE-FO coefficients
nfiles = len(GRACE_Ylms.time)
#-- filter GRACE/GRACE-FO coefficients
if DESTRIPE:
#-- destriping GRACE/GRACE-FO coefficients
ds_str = '_FL'
GRACE_Ylms = GRACE_Ylms.destripe()
else:
#-- using standard GRACE/GRACE-FO harmonics
ds_str = ''
#-- input GIA spherical harmonic datafiles
GIA_Ylms_rate = read_GIA_model(GIA_FILE,GIA=GIA,LMAX=LMAX,MMAX=MMAX)
#-- calculate the monthly mass change from GIA
GIA_Ylms = GRACE_Ylms.zeros_like()
GIA_Ylms.time[:] = np.copy(GRACE_Ylms.time)
GIA_Ylms.month[:] = np.copy(GRACE_Ylms.month)
#-- monthly GIA calculated by gia_rate*time elapsed
#-- finding change in GIA each month
for t in range(nfiles):
GIA_Ylms.clm[:,:,t] = GIA_Ylms_rate['clm']*(GIA_Ylms.time[t]-2003.3)
GIA_Ylms.slm[:,:,t] = GIA_Ylms_rate['slm']*(GIA_Ylms.time[t]-2003.3)
#-- Read Ocean function and convert to Ylms for redistribution
if REDISTRIBUTE_REMOVED:
#-- read Land-Sea Mask and convert to spherical harmonics
ocean_Ylms = ocean_stokes(LANDMASK,LMAX,MMAX=MMAX,LOVE=(hl,kl,ll))
ocean_str = '_OCN'
else:
ocean_str = ''
#-- input spherical harmonic datafiles to be removed from the GRACE data
#-- Remove sets of Ylms from the GRACE data before returning
remove_Ylms = GRACE_Ylms.zeros_like()
remove_Ylms.time[:] = np.copy(GRACE_Ylms.time)
remove_Ylms.month[:] = np.copy(GRACE_Ylms.month)
if REMOVE_FILES:
#-- extend list if a single format was entered for all files
if len(REMOVE_FORMAT) < len(REMOVE_FILES):
REMOVE_FORMAT = REMOVE_FORMAT*len(REMOVE_FILES)
#-- for each file to be removed
for REMOVE_FILE,REMOVEFORM in zip(REMOVE_FILES,REMOVE_FORMAT):
if REMOVEFORM in ('ascii','netCDF4','HDF5'):
#-- ascii (.txt)
#-- netCDF4 (.nc)
#-- HDF5 (.H5)
Ylms = harmonics().from_file(REMOVE_FILE, format=REMOVEFORM)
elif REMOVEFORM in ('index-ascii','index-netCDF4','index-HDF5'):
#-- read from index file
_,removeform = REMOVEFORM.split('-')
#-- index containing files in data format
Ylms = harmonics().from_index(REMOVE_FILE, format=removeform)
#-- reduce to GRACE/GRACE-FO months and truncate to degree and order
Ylms = Ylms.subset(GRACE_Ylms.month).truncate(lmax=LMAX,mmax=MMAX)
#-- distribute removed Ylms uniformly over the ocean
if REDISTRIBUTE_REMOVED:
#-- calculate ratio between total removed mass and
#-- a uniformly distributed cm of water over the ocean
ratio = Ylms.clm[0,0,:]/ocean_Ylms.clm[0,0]
#-- for each spherical harmonic
for m in range(0,MMAX+1):#-- MMAX+1 to include MMAX
for l in range(m,LMAX+1):#-- LMAX+1 to include LMAX
#-- remove the ratio*ocean Ylms from Ylms
#-- note: x -= y is equivalent to x = x - y
Ylms.clm[l,m,:] -= ratio*ocean_Ylms.clm[l,m]
Ylms.slm[l,m,:] -= ratio*ocean_Ylms.slm[l,m]
#-- filter removed coefficients
if DESTRIPE:
Ylms = Ylms.destripe()
#-- add data for month t and INDEX_FILE to the total
#-- remove_clm and remove_slm matrices
#-- redistributing the mass over the ocean if specified
remove_Ylms.add(Ylms)
#-- Output spatial data object
grid = spatial()
#-- Output Degree Spacing
dlon,dlat = (DDEG[0],DDEG[0]) if (len(DDEG) == 1) else (DDEG[0],DDEG[1])
#-- Output Degree Interval
if (INTERVAL == 1):
#-- (-180:180,90:-90)
nlon = np.int64((360.0/dlon)+1.0)
nlat = np.int64((180.0/dlat)+1.0)
grid.lon = -180 + dlon*np.arange(0,nlon)
grid.lat = 90.0 - dlat*np.arange(0,nlat)
elif (INTERVAL == 2):
#-- (Degree spacing)/2
grid.lon = np.arange(-180+dlon/2.0,180+dlon/2.0,dlon)
grid.lat = np.arange(90.0-dlat/2.0,-90.0-dlat/2.0,-dlat)
nlon = len(grid.lon)
nlat = len(grid.lat)
elif (INTERVAL == 3):
#-- non-global grid set with BOUNDS parameter
minlon,maxlon,minlat,maxlat = BOUNDS.copy()
grid.lon = np.arange(minlon+dlon/2.0,maxlon+dlon/2.0,dlon)
grid.lat = np.arange(maxlat-dlat/2.0,minlat-dlat/2.0,-dlat)
nlon = len(grid.lon)
nlat = len(grid.lat)
#-- Computing plms for converting to spatial domain
theta = (90.0-grid.lat)*np.pi/180.0
PLM,dPLM = plm_holmes(LMAX,np.cos(theta))
#-- Earth Parameters
#-- output spatial units
unit_list = ['cmwe', 'mmGH', 'mmCU', u'\u03BCGal', 'mbar']
unit_name = ['Equivalent Water Thickness', 'Geoid Height',
'Elastic Crustal Uplift', 'Gravitational Undulation',
'Equivalent Surface Pressure']
#-- Setting units factor for output
#-- dfactor computes the degree dependent coefficients
if (UNITS == 1):
#-- 1: cmwe, centimeters water equivalent
dfactor = units(lmax=LMAX).harmonic(hl,kl,ll).cmwe
elif (UNITS == 2):
#-- 2: mmGH, mm geoid height
dfactor = units(lmax=LMAX).harmonic(hl,kl,ll).mmGH
elif (UNITS == 3):
#-- 3: mmCU, mm elastic crustal deformation
dfactor = units(lmax=LMAX).harmonic(hl,kl,ll).mmCU
elif (UNITS == 4):
#-- 4: micGal, microGal gravity perturbations
dfactor = units(lmax=LMAX).harmonic(hl,kl,ll).microGal
elif (UNITS == 5):
#-- 5: mbar, millibars equivalent surface pressure
dfactor = units(lmax=LMAX).harmonic(hl,kl,ll).mbar
else:
raise ValueError('Invalid units code {0:d}'.format(UNITS))
#-- output file format
file_format = '{0}{1}_L{2:d}{3}{4}{5}_{6:03d}.{7}'
#-- converting harmonics to truncated, smoothed coefficients in units
#-- combining harmonics to calculate output spatial fields
for i,grace_month in enumerate(GRACE_Ylms.month):
#-- GRACE/GRACE-FO harmonics for time t
Ylms = GRACE_Ylms.index(i)
#-- Remove GIA rate for time
Ylms.subtract(GIA_Ylms.index(i))
#-- Remove monthly files to be removed
Ylms.subtract(remove_Ylms.index(i))
#-- smooth harmonics and convert to output units
Ylms.convolve(dfactor*wt)
#-- convert spherical harmonics to output spatial grid
grid.data = harmonic_summation(Ylms.clm, Ylms.slm,
grid.lon, grid.lat, LMIN=LMIN, LMAX=LMAX,
MMAX=MMAX, PLM=PLM).T
#-- copy time variables for month
grid.time = np.copy(Ylms.time)
grid.month = np.copy(Ylms.month)
#-- output monthly files to ascii, netCDF4 or HDF5
args=(FILE_PREFIX,unit_list[UNITS-1],LMAX,order_str,gw_str,
ds_str,grace_month,suffix[DATAFORM])
FILE=os.path.join(OUTPUT_DIRECTORY,file_format.format(*args))
if (DATAFORM == 'ascii'):
#-- ascii (.txt)
grid.to_ascii(FILE, date=True, verbose=VERBOSE)
elif (DATAFORM == 'netCDF4'):
#-- netCDF4
grid.to_netCDF4(FILE, date=True, verbose=VERBOSE,
units=unit_list[UNITS-1], longname=unit_name[UNITS-1],
title='GRACE/GRACE-FO Spatial Data')
elif (DATAFORM == 'HDF5'):
#-- HDF5
grid.to_HDF5(FILE, date=True, verbose=VERBOSE,
units=unit_list[UNITS-1], longname=unit_name[UNITS-1],
title='GRACE/GRACE-FO Spatial Data')
#-- set the permissions mode of the output files
os.chmod(FILE, MODE)
#-- add file to list
output_files.append(FILE)
#-- return the list of output files
return output_files
#-- PURPOSE: print a file log for the GRACE analysis
def output_log_file(arguments,output_files):
#-- format: GRACE_processing_run_2002-04-01_PID-70335.log
args = (time.strftime('%Y-%m-%d',time.localtime()), os.getpid())
LOGFILE = 'GRACE_processing_run_{0}_PID-{1:d}.log'.format(*args)
#-- create a unique log and open the log file
DIRECTORY = os.path.expanduser(arguments.output_directory)
fid = utilities.create_unique_file(os.path.join(DIRECTORY,LOGFILE))
logging.basicConfig(stream=fid, level=logging.INFO)
#-- print argument values sorted alphabetically
logging.info('ARGUMENTS:')
for arg, value in sorted(vars(arguments).items()):
logging.info('{0}: {1}'.format(arg, value))
#-- print output files
logging.info('\n\nOUTPUT FILES:')
for f in output_files:
logging.info('{0}'.format(f))
#-- close the log file
fid.close()
#-- PURPOSE: print a error file log for the GRACE analysis
def output_error_log_file(arguments):
#-- format: GRACE_processing_failed_run_2002-04-01_PID-70335.log
args = (time.strftime('%Y-%m-%d',time.localtime()), os.getpid())
LOGFILE = 'GRACE_processing_failed_run_{0}_PID-{1:d}.log'.format(*args)
#-- create a unique log and open the log file
DIRECTORY = os.path.expanduser(arguments.output_directory)
fid = utilities.create_unique_file(os.path.join(DIRECTORY,LOGFILE))
logging.basicConfig(stream=fid, level=logging.INFO)
#-- print argument values sorted alphabetically
logging.info('ARGUMENTS:')
for arg, value in sorted(vars(arguments).items()):
logging.info('{0}: {1}'.format(arg, value))
#-- print traceback error
logging.info('\n\nTRACEBACK ERROR:')
traceback.print_exc(file=fid)
#-- close the log file
fid.close()
#-- This is the main part of the program that calls the individual modules
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Calculates monthly spatial maps from GRACE/GRACE-FO
spherical harmonic coefficients
""",
fromfile_prefix_chars="@"
)
parser.convert_arg_line_to_args = utilities.convert_arg_line_to_args
#-- command line parameters
#-- working data directory
parser.add_argument('--directory','-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Working data directory')
parser.add_argument('--output-directory','-O',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Output directory for spatial files')
parser.add_argument('--file-prefix','-P',
type=str,
help='Prefix string for input and output files')
#-- Data processing center or satellite mission
parser.add_argument('--center','-c',
metavar='PROC', type=str, required=True,
help='GRACE/GRACE-FO Processing Center')
#-- GRACE/GRACE-FO data release
parser.add_argument('--release','-r',
metavar='DREL', type=str, default='RL06',
help='GRACE/GRACE-FO Data Release')
#-- GRACE/GRACE-FO Level-2 data product
parser.add_argument('--product','-p',
metavar='DSET', type=str, default='GSM',
help='GRACE/GRACE-FO Level-2 data product')
#-- minimum spherical harmonic degree
parser.add_argument('--lmin',
type=int, default=1,
help='Minimum spherical harmonic degree')
#-- maximum spherical harmonic degree and order
parser.add_argument('--lmax','-l',
type=int, default=60,
help='Maximum spherical harmonic degree')
parser.add_argument('--mmax','-m',
type=int, default=None,
help='Maximum spherical harmonic order')
#-- start and end GRACE/GRACE-FO months
parser.add_argument('--start','-S',
type=int, default=4,
help='Starting GRACE/GRACE-FO month')
parser.add_argument('--end','-E',
type=int, default=232,
help='Ending GRACE/GRACE-FO month')
MISSING = [6,7,18,109,114,125,130,135,140,141,146,151,156,162,166,167,
172,177,178,182,187,188,189,190,191,192,193,194,195,196,197,200,201]
parser.add_argument('--missing','-N',
metavar='MISSING', type=int, nargs='+', default=MISSING,
help='Missing GRACE/GRACE-FO months')
#-- different treatments of the load Love numbers
#-- 0: Han and Wahr (1995) values from PREM
#-- 1: Gegout (2005) values from PREM
#-- 2: Wang et al. (2012) values from PREM
parser.add_argument('--love','-n',
type=int, default=0, choices=[0,1,2],
help='Treatment of the Load Love numbers')
#-- option for setting reference frame for gravitational load love number
#-- reference frame options (CF, CM, CE)
parser.add_argument('--reference',
type=str.upper, default='CF', choices=['CF','CM','CE'],
help='Reference frame for load Love numbers')
#-- Gaussian smoothing radius (km)
parser.add_argument('--radius','-R',
type=float, default=0,
help='Gaussian smoothing radius (km)')
#-- Use a decorrelation (destriping) filter
parser.add_argument('--destripe','-d',
default=False, action='store_true',
help='Use decorrelation (destriping) filter')
#-- output units
parser.add_argument('--units','-U',
type=int, default=1, choices=[1,2,3,4,5],
help='Output units')
#-- output grid parameters
parser.add_argument('--spacing',
type=float, nargs='+', default=[0.5,0.5], metavar=('dlon','dlat'),
help='Spatial resolution of output data')
parser.add_argument('--interval',
type=int, default=2, choices=[1,2,3],
help=('Output grid interval '
'(1: global, 2: centered global, 3: non-global)'))
parser.add_argument('--bounds',
type=float, nargs=4, metavar=('lon_min','lon_max','lat_min','lat_max'),
help='Bounding box for non-global grid')
#-- GIA model type list
models = {}
models['IJ05-R2'] = 'Ivins R2 GIA Models'
models['W12a'] = 'Whitehouse GIA Models'
models['SM09'] = 'Simpson/Milne GIA Models'
models['ICE6G'] = 'ICE-6G GIA Models'
models['Wu10'] = 'Wu (2010) GIA Correction'
models['AW13-ICE6G'] = 'Geruo A ICE-6G GIA Models'
models['Caron'] = 'Caron JPL GIA Assimilation'
models['ICE6G-D'] = 'ICE-6G Version-D GIA Models'
models['ascii'] = 'reformatted GIA in ascii format'
models['netCDF4'] = 'reformatted GIA in netCDF4 format'
models['HDF5'] = 'reformatted GIA in HDF5 format'
#-- GIA model type
parser.add_argument('--gia','-G',
type=str, metavar='GIA', choices=models.keys(),
help='GIA model type to read')
#-- full path to GIA file
parser.add_argument('--gia-file',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
help='GIA file to read')
#-- use atmospheric jump corrections from Fagiolini et al. (2015)
parser.add_argument('--atm-correction',
default=False, action='store_true',
help='Apply atmospheric jump correction coefficients')
#-- correct for pole tide drift follow Wahr et al. (2015)
parser.add_argument('--pole-tide',
default=False, action='store_true',
help='Correct for pole tide drift')
#-- Update Degree 1 coefficients with SLR or derived values
#-- Tellus: GRACE/GRACE-FO TN-13 from PO.DAAC
#-- https://grace.jpl.nasa.gov/data/get-data/geocenter/
#-- SLR: satellite laser ranging from CSR
#-- ftp://ftp.csr.utexas.edu/pub/slr/geocenter/
#-- SLF: Sutterley and Velicogna, Remote Sensing (2019)
#-- https://www.mdpi.com/2072-4292/11/18/2108
#-- Swenson: GRACE-derived coefficients from Sean Swenson
#-- https://doi.org/10.1029/2007JB005338
#-- GFZ: GRACE/GRACE-FO coefficients from GFZ GravIS
#-- http://gravis.gfz-potsdam.de/corrections
parser.add_argument('--geocenter',
metavar='DEG1', type=str,
choices=['Tellus','SLR','SLF','Swenson','GFZ'],
help='Update Degree 1 coefficients with SLR or derived values')
parser.add_argument('--interpolate-geocenter',
default=False, action='store_true',
help='Least-squares model missing Degree 1 coefficients')
#-- replace low degree harmonics with values from Satellite Laser Ranging
parser.add_argument('--slr-c20',
type=str, default=None, choices=['CSR','GFZ','GSFC'],
help='Replace C20 coefficients with SLR values')
parser.add_argument('--slr-21',
type=str, default=None, choices=['CSR','GFZ','GSFC'],
help='Replace C21 and S21 coefficients with SLR values')
parser.add_argument('--slr-22',
type=str, default=None, choices=['CSR','GSFC'],
help='Replace C22 and S22 coefficients with SLR values')
parser.add_argument('--slr-c30',
type=str, default=None, choices=['CSR','GFZ','GSFC','LARES'],
help='Replace C30 coefficients with SLR values')
parser.add_argument('--slr-c50',
type=str, default=None, choices=['CSR','GSFC','LARES'],
help='Replace C50 coefficients with SLR values')
#-- input data format (ascii, netCDF4, HDF5)
parser.add_argument('--format','-F',
type=str, default='netCDF4', choices=['ascii','netCDF4','HDF5'],
help='Input/output data format')
#-- mean file to remove
parser.add_argument('--mean-file',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
help='GRACE/GRACE-FO mean file to remove from the harmonic data')
#-- input data format (ascii, netCDF4, HDF5)
parser.add_argument('--mean-format',
type=str, default='netCDF4', choices=['ascii','netCDF4','HDF5','gfc'],
help='Input data format for GRACE/GRACE-FO mean file')
#-- monthly files to be removed from the GRACE/GRACE-FO data
parser.add_argument('--remove-file',
type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs='+',
help='Monthly files to be removed from the GRACE/GRACE-FO data')
choices = []
choices.extend(['ascii','netCDF4','HDF5'])
choices.extend(['index-ascii','index-netCDF4','index-HDF5'])
parser.add_argument('--remove-format',
type=str, nargs='+', choices=choices,
help='Input data format for files to be removed')
parser.add_argument('--redistribute-removed',
default=False, action='store_true',
help='Redistribute removed mass fields over the ocean')
#-- land-sea mask for redistributing fluxes
lsmask = utilities.get_data_path(['data','landsea_hd.nc'])
parser.add_argument('--mask',
type=lambda p: os.path.abspath(os.path.expanduser(p)), default=lsmask,
help='Land-sea mask for redistributing land water flux')
#-- Output log file for each job in forms
#-- GRACE_processing_run_2002-04-01_PID-00000.log
#-- GRACE_processing_failed_run_2002-04-01_PID-00000.log
parser.add_argument('--log',
default=False, action='store_true',
help='Output log file for each job')
#-- print information about each input and output file
parser.add_argument('--verbose','-V',
default=False, action='store_true',
help='Verbose output of run')
#-- permissions mode of the local directories and files (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='permissions mode of output files')
args,_ = parser.parse_known_args()
#-- create logger
loglevel = logging.INFO if args.verbose else logging.CRITICAL
logging.basicConfig(level=loglevel)
#-- try to run the analysis with listed parameters
try:
info(args)
#-- run grace_spatial_maps algorithm with parameters
output_files = grace_spatial_maps(
args.directory,
args.center,
args.release,
args.product,
args.lmax,
args.radius,
START=args.start,
END=args.end,
MISSING=args.missing,
LMIN=args.lmin,
MMAX=args.mmax,
LOVE_NUMBERS=args.love,
REFERENCE=args.reference,
DESTRIPE=args.destripe,
UNITS=args.units,
DDEG=args.spacing,
INTERVAL=args.interval,
BOUNDS=args.bounds,
GIA=args.gia,
GIA_FILE=args.gia_file,
ATM=args.atm_correction,
POLE_TIDE=args.pole_tide,
DEG1=args.geocenter,
MODEL_DEG1=args.interpolate_geocenter,
SLR_C20=args.slr_c20,
SLR_21=args.slr_21,
SLR_22=args.slr_22,
SLR_C30=args.slr_c30,
SLR_C50=args.slr_c50,
DATAFORM=args.format,
MEAN_FILE=args.mean_file,
MEANFORM=args.mean_format,
REMOVE_FILES=args.remove_file,
REMOVE_FORMAT=args.remove_format,
REDISTRIBUTE_REMOVED=args.redistribute_removed,
LANDMASK=args.mask,
OUTPUT_DIRECTORY=args.output_directory,
FILE_PREFIX=args.file_prefix,
VERBOSE=args.verbose,
MODE=args.mode)
except Exception as e:
#-- if there has been an error exception
#-- print the type, value, and stack trace of the
#-- current exception being handled
logging.critical('process id {0:d} failed'.format(os.getpid()))
logging.error(traceback.format_exc())
if args.log:#-- write failed job completion log file
output_error_log_file(args)
else:
if args.log:#-- write successful job completion log file
output_log_file(args,output_files)
#-- run main program
if __name__ == '__main__':
main()
| 43.747292
| 80
| 0.653353
|
a224cc0b77f607859b8ce4ea8a9f31b9182d5bb0
| 492
|
py
|
Python
|
12_rnn.py
|
tuhoag/pytorch-zero2all
|
31bf132265e2765616f18ca4278e832301c27e89
|
[
"MIT"
] | null | null | null |
12_rnn.py
|
tuhoag/pytorch-zero2all
|
31bf132265e2765616f18ca4278e832301c27e89
|
[
"MIT"
] | null | null | null |
12_rnn.py
|
tuhoag/pytorch-zero2all
|
31bf132265e2765616f18ca4278e832301c27e89
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
idx2char = ['h', 'i', 'e', 'l', 'o']
x_data = [[0, 1, 0, 2, 3, 3]] # 'hihell
one_hot_lookup = [[]]
cell = nn.LSTM(input_size=4, hidden_size=2, batch_first=True)
h = [1, 0, 0, 0]
e = [0, 1, 0, 0]
l = [0, 0, 1, 0]
o = [0, 0, 0, 1]
inputs = torch.tensor([[h, e, l, l, o], [e, o, l, l, l], [l, l, e, e, l]]).float()
print('input size: ', inputs.size())
hidden = torch.randn(1, 3, 2), torch.randn(1, 3, 2)
out, hidden = cell(inputs, hidden)
print(out.size())
| 25.894737
| 82
| 0.550813
|
e3e8f56284664afae2d5fbc16ed7b455c7a60653
| 9,957
|
py
|
Python
|
pybind/slxos/v16r_1_00b/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/af_vrf_neighbor_capability/as4/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/af_vrf_neighbor_capability/as4/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/af_vrf_neighbor_capability/as4/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class as4(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/router/router-bgp/address-family/ipv4/ipv4-unicast/af-vrf/neighbor/af-ipv4-vrf-neighbor-address-holder/af-ipv4-neighbor-addr/af-vrf-neighbor-capability/as4. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__neighbor_as4_enable','__neighbor_as4_disable',)
_yang_name = 'as4'
_rest_name = 'as4'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__neighbor_as4_enable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="neighbor-as4-enable", rest_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable AS4 capability', u'cli-full-command': None, u'alt-name': u'enable'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
self.__neighbor_as4_disable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="neighbor-as4-disable", rest_name="disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Disable AS4 capability', u'cli-full-command': None, u'alt-name': u'disable'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'router', u'router-bgp', u'address-family', u'ipv4', u'ipv4-unicast', u'af-vrf', u'neighbor', u'af-ipv4-vrf-neighbor-address-holder', u'af-ipv4-neighbor-addr', u'af-vrf-neighbor-capability', u'as4']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'router', u'bgp', u'address-family', u'ipv4', u'unicast', u'vrf', u'neighbor', u'af-ipv4-neighbor-addr', u'capability', u'as4']
def _get_neighbor_as4_enable(self):
"""
Getter method for neighbor_as4_enable, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/af_vrf_neighbor_capability/as4/neighbor_as4_enable (empty)
"""
return self.__neighbor_as4_enable
def _set_neighbor_as4_enable(self, v, load=False):
"""
Setter method for neighbor_as4_enable, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/af_vrf_neighbor_capability/as4/neighbor_as4_enable (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor_as4_enable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor_as4_enable() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="neighbor-as4-enable", rest_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable AS4 capability', u'cli-full-command': None, u'alt-name': u'enable'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """neighbor_as4_enable must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="neighbor-as4-enable", rest_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable AS4 capability', u'cli-full-command': None, u'alt-name': u'enable'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)""",
})
self.__neighbor_as4_enable = t
if hasattr(self, '_set'):
self._set()
def _unset_neighbor_as4_enable(self):
self.__neighbor_as4_enable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="neighbor-as4-enable", rest_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable AS4 capability', u'cli-full-command': None, u'alt-name': u'enable'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
def _get_neighbor_as4_disable(self):
"""
Getter method for neighbor_as4_disable, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/af_vrf_neighbor_capability/as4/neighbor_as4_disable (empty)
"""
return self.__neighbor_as4_disable
def _set_neighbor_as4_disable(self, v, load=False):
"""
Setter method for neighbor_as4_disable, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/af_vrf_neighbor_capability/as4/neighbor_as4_disable (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor_as4_disable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor_as4_disable() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="neighbor-as4-disable", rest_name="disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Disable AS4 capability', u'cli-full-command': None, u'alt-name': u'disable'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """neighbor_as4_disable must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="neighbor-as4-disable", rest_name="disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Disable AS4 capability', u'cli-full-command': None, u'alt-name': u'disable'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)""",
})
self.__neighbor_as4_disable = t
if hasattr(self, '_set'):
self._set()
def _unset_neighbor_as4_disable(self):
self.__neighbor_as4_disable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="neighbor-as4-disable", rest_name="disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Disable AS4 capability', u'cli-full-command': None, u'alt-name': u'disable'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
neighbor_as4_enable = __builtin__.property(_get_neighbor_as4_enable, _set_neighbor_as4_enable)
neighbor_as4_disable = __builtin__.property(_get_neighbor_as4_disable, _set_neighbor_as4_disable)
_pyangbind_elements = {'neighbor_as4_enable': neighbor_as4_enable, 'neighbor_as4_disable': neighbor_as4_disable, }
| 63.018987
| 457
| 0.740183
|
3164f33fb99f64838caa5c4907fdf39d3b6eea25
| 4,246
|
py
|
Python
|
src/azotea/gui/widgets/about.py
|
actionprojecteu/azotea-client
|
cbb6e8a5fcf06bf0686cced222eed7b807195f2c
|
[
"MIT"
] | null | null | null |
src/azotea/gui/widgets/about.py
|
actionprojecteu/azotea-client
|
cbb6e8a5fcf06bf0686cced222eed7b807195f2c
|
[
"MIT"
] | null | null | null |
src/azotea/gui/widgets/about.py
|
actionprojecteu/azotea-client
|
cbb6e8a5fcf06bf0686cced222eed7b807195f2c
|
[
"MIT"
] | null | null | null |
# ----------------------------------------------------------------------
# Copyright (c) 2020
#
# See the LICENSE file for details
# see the AUTHORS file for authors
# ----------------------------------------------------------------------
#################################
## APPLICATION SPECIFIC WIDGETS #
#################################
#--------------------
# System wide imports
# -------------------
import math
import gettext
import tkinter as tk
from tkinter import ttk
import tkinter.filedialog
# -------------------
# Third party imports
# -------------------
import PIL
# ---------------
# Twisted imports
# ---------------
from twisted.logger import Logger
# -------------
# local imports
# -------------
from azotea.gui.widgets.contrib import ToolTip
# ----------------
# Module constants
# ----------------
# Support for internationalization
_ = gettext.gettext
NAMESPACE = 'gui'
# -----------------------
# Module global variables
# -----------------------
log = Logger(namespace=NAMESPACE)
# -----------------
# Application Class
# -----------------
class AboutDialog(tk.Toplevel):
def __init__(self, title, version, descr_path, ack_path, img_path, logos_list, *args, ncols=3, **kwargs):
super().__init__(*args, **kwargs)
self._title = title
self._version = version
self._descr_path = descr_path
self._ack_path = ack_path
self._img_path = img_path
self._logos_list = logos_list
self._ncols = ncols
self.build()
self.grab_set()
def build(self):
self.title(self._title)
# TOP superframe
top_frame = ttk.Frame(self, borderwidth=2, relief=tk.GROOVE)
top_frame.pack(side=tk.TOP, expand=True, fill=tk.X, padx=5, pady=5)
# Bottom frame
bottom_frame = ttk.Frame(self, borderwidth=2, relief=tk.GROOVE)
bottom_frame.pack(side=tk.BOTTOM, expand=True, fill=tk.X, padx=5, pady=5)
# Lower Button
button = ttk.Button(bottom_frame, text=_("Close"), command=self.onCloseButton)
button.pack(side=tk.BOTTOM, padx=10, pady=5)
# Right Frame
ri_frame = ttk.Frame(top_frame)
ri_frame.pack(side=tk.RIGHT, expand=True, fill=tk.BOTH, padx=5, pady=5)
text = self._version
label = ttk.Label(ri_frame, text=text)
label.pack(side=tk.TOP, anchor=tk.W, expand=False, fill=tk.BOTH, padx=5, pady=5)
txt = self.loadText(ri_frame, self._descr_path)
txt.pack(side=tk.TOP, anchor=tk.W, expand=False, fill=tk.BOTH, padx=5, pady=5)
br_frame = ttk.LabelFrame(ri_frame, text=_("Acknowledgements"))
br_frame.pack(side=tk.RIGHT, expand=True, fill=tk.BOTH, padx=5, pady=5)
txt = self.loadText(br_frame, self._ack_path)
txt.pack(side=tk.TOP, expand=True, fill=tk.BOTH, padx=5, pady=5)
# Left Frame
le_frame = ttk.Frame(top_frame)
le_frame.pack(side=tk.LEFT, expand=True, fill=tk.BOTH, padx=5, pady=5)
img = self.loadIcon(le_frame, self._img_path)
img.pack(side=tk.TOP, expand=True, fill=tk.BOTH, padx=5, pady=5)
# List of logos in a lower left frame with grid
ll_frame = ttk.Frame(le_frame)
ll_frame.pack(side=tk.TOP, expand=True, fill=tk.BOTH, padx=5, pady=5)
nrows = math.ceil(len(self._logos_list)/self._ncols)
infoiter = iter(self._logos_list)
for row in range(nrows):
for col in range(self._ncols):
try:
tip, path = next(infoiter)
img = self.loadIcon(ll_frame, path)
ToolTip(img, tip)
img.grid(row=row, column=col, padx=2, pady=2)
except StopIteration:
break
# Buttons callbacks
def onCloseButton(self):
self.destroy()
def loadIcon(self, parent, path):
img = PIL.ImageTk.PhotoImage(PIL.Image.open(path))
icon = ttk.Label(parent, image = img)
icon.photo = img
return icon
def loadText(self, parent, path):
with open(path) as fd:
text = ' '.join(fd.readlines())
txt = tk.Message(parent, justify=tk.LEFT, text=text)
return txt
| 29.692308
| 109
| 0.552285
|
d2d312a6e7082d02c4fabc986e2736b1c5983e1d
| 1,433
|
py
|
Python
|
bot/utils.py
|
rsoorajs/keepTelegram
|
1039f164cee4a7833254764abe1020e1e7343010
|
[
"MIT"
] | 9
|
2021-01-01T19:01:02.000Z
|
2021-11-08T09:52:19.000Z
|
bot/utils.py
|
rsoorajs/keepTelegram
|
1039f164cee4a7833254764abe1020e1e7343010
|
[
"MIT"
] | 15
|
2021-07-01T19:02:18.000Z
|
2022-02-01T19:01:48.000Z
|
bot/utils.py
|
telegrambotdev/keepTelegram
|
1039f164cee4a7833254764abe1020e1e7343010
|
[
"MIT"
] | 3
|
2021-02-17T12:32:36.000Z
|
2021-11-14T14:38:15.000Z
|
"""Utils for bot"""
from datetime import datetime
# ------------ Program variable start ----------- #
status_codes = {
0: {'str': 'unready', 'reverse_str': 'ready', 'int': 1},
1: {'str': 'ready', 'reverse_str': 'unready', 'int': 0},
}
note_fields = ['header', 'text', 'time']
buttons_text = {
'get_text': 'Get list of available commands',
'add_text': 'Add a new note'}
# ------------ Program variables end ------------ #
# ------------ Program functions start ---------- #
def note_template(data):
"""Create note template"""
return f"""
<strong>Header</strong>: <i>{data[1]}</i>
<strong>Text</strong>: <i>{data[2]}</i>
<strong>Status</strong>: <i>{status_codes[data[3]].get('str')}</i>
<strong>Due time</strong>: <i>{data[4]}</i>
"""
def statistics_template(data):
"""Create statistics template"""
return f"""
\N{memo} Number of <strong>all</strong> notes: <i>{data.get('all_num')}</i>
\N{cross mark} Number of <strong>unready</strong> notes: <i>{data.get('unready_num')}</i>
\N{check mark} Number of <strong>ready</strong> notes: <i>{data.get('ready_num')}</i>
"""
def get_time_obj(date_time_str):
"""Check if date format is correct"""
try:
date_time_obj = datetime.strptime(date_time_str, '%d/%m/%y %H:%M:%S')
return date_time_obj
except ValueError as error:
print(f'Error: {error}')
return None
# ------------ Program functions end ------------ #
| 31.844444
| 89
| 0.586881
|
ca43f43d224f5c487bcbac7ffb5a253c4d783ff0
| 3,678
|
py
|
Python
|
src/Python/VisualizationAlgorithms/SpikeFran.py
|
sankhesh/vtk-examples
|
2d50e847ad62ce0eb71b66c029ad8abb302cd39f
|
[
"Apache-2.0"
] | null | null | null |
src/Python/VisualizationAlgorithms/SpikeFran.py
|
sankhesh/vtk-examples
|
2d50e847ad62ce0eb71b66c029ad8abb302cd39f
|
[
"Apache-2.0"
] | null | null | null |
src/Python/VisualizationAlgorithms/SpikeFran.py
|
sankhesh/vtk-examples
|
2d50e847ad62ce0eb71b66c029ad8abb302cd39f
|
[
"Apache-2.0"
] | 1
|
2022-02-16T08:20:41.000Z
|
2022-02-16T08:20:41.000Z
|
#!/usr/bin/env python
import vtk
def main():
fileName = get_program_parameters()
colors = vtk.vtkNamedColors()
fran = vtk.vtkPolyDataReader()
fran.SetFileName(fileName)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(fran.GetOutputPort())
normals.FlipNormalsOn()
franMapper = vtk.vtkPolyDataMapper()
franMapper.SetInputConnection(normals.GetOutputPort())
franActor = vtk.vtkActor()
franActor.SetMapper(franMapper)
franActor.GetProperty().SetColor(colors.GetColor3d('Flesh'))
# We subsample the dataset because we want to glyph just a subset of
# the points. Otherwise the display is cluttered and cannot be easily
# read. The RandomModeOn and SetOnRatio combine to random select one out
# of every 10 points in the dataset.
#
ptMask = vtk.vtkMaskPoints()
ptMask.SetInputConnection(normals.GetOutputPort())
ptMask.SetOnRatio(10)
ptMask.RandomModeOn()
# In this case we are using a cone as a glyph. We transform the cone so
# its base is at 0,0,0. This is the point where glyph rotation occurs.
cone = vtk.vtkConeSource()
cone.SetResolution(6)
transform = vtk.vtkTransform()
transform.Translate(0.5, 0.0, 0.0)
transformF = vtk.vtkTransformPolyDataFilter()
transformF.SetInputConnection(cone.GetOutputPort())
transformF.SetTransform(transform)
# vtkGlyph3D takes two inputs: the input point set (SetInputConnection)
# which can be any vtkDataSet and the glyph (SetSourceConnection) which
# must be a vtkPolyData. We are interested in orienting the glyphs by the
# surface normals that we previously generated.
glyph = vtk.vtkGlyph3D()
glyph.SetInputConnection(ptMask.GetOutputPort())
glyph.SetSourceConnection(transformF.GetOutputPort())
glyph.SetVectorModeToUseNormal()
glyph.SetScaleModeToScaleByVector()
glyph.SetScaleFactor(0.004)
spikeMapper = vtk.vtkPolyDataMapper()
spikeMapper.SetInputConnection(glyph.GetOutputPort())
spikeActor = vtk.vtkActor()
spikeActor.SetMapper(spikeMapper)
spikeActor.GetProperty().SetColor(colors.GetColor3d('Emerald_Green'))
# Create the RenderWindow, Renderer and Interactor.
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size.
#
ren1.AddActor(franActor)
ren1.AddActor(spikeActor)
renWin.SetSize(640, 480)
renWin.SetWindowName('SpikeFran')
ren1.SetBackground(colors.GetColor3d('SlateGray'))
# Render the image.
#
renWin.Render()
ren1.GetActiveCamera().Zoom(1.4)
ren1.GetActiveCamera().Azimuth(110)
renWin.Render()
iren.Start()
def get_program_parameters():
import argparse
description = 'This example demonstrates the use of glyphing.'
epilogue = '''
We also use a mask filter to select a subset of points to glyph.
About the data file:
This originally was a Cyberware laser digitizer scan
of Fran J.'s face. Surface normals are generated based on local geometry
(i.e., the polygon normals surrounding each point are averaged). We flip
the normals because we want them to point out from Fran's face.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='fran_cut.vtk.')
args = parser.parse_args()
return args.filename
if __name__ == '__main__':
main()
| 31.982609
| 90
| 0.712616
|
ee049930d7cd3e1d826d9ae8e2fe4f88e95d787e
| 1,328
|
py
|
Python
|
api/ansible_api/serializers/project.py
|
240325184/KubeOperator
|
777774050b236abf938a5a9ef505124c26e4916e
|
[
"Apache-2.0"
] | 3
|
2019-11-29T03:49:08.000Z
|
2020-07-29T02:52:51.000Z
|
api/ansible_api/serializers/project.py
|
240325184/KubeOperator
|
777774050b236abf938a5a9ef505124c26e4916e
|
[
"Apache-2.0"
] | 27
|
2021-05-05T02:51:26.000Z
|
2022-01-04T21:30:21.000Z
|
api/ansible_api/serializers/project.py
|
240325184/KubeOperator
|
777774050b236abf938a5a9ef505124c26e4916e
|
[
"Apache-2.0"
] | 1
|
2020-07-06T04:53:51.000Z
|
2020-07-06T04:53:51.000Z
|
# -*- coding: utf-8 -*-
#
import re
from rest_framework import serializers
from django.core.validators import RegexValidator
from ..models import Project
__all__ = [
"ProjectSerializer"
]
class ProjectSerializer(serializers.ModelSerializer):
options = serializers.DictField(required=False, default={})
validated_options = ('forks', 'timeout')
class Meta:
model = Project
fields = [
'id', 'name', 'options', 'comment',
'created_by', 'date_created'
]
read_only_fields = ('id', 'created_by', 'date_created')
def validate_options(self, values):
for k in values:
if k not in self.validated_options:
raise serializers.ValidationError(
"Option {} not in {}".format(k, self.validated_options)
)
return values
# 因为drf slug field 存在问题所以,重写了
# 见 https://github.com/encode/django-rest-framework/pull/6167/
def get_fields(self):
fields = super().get_fields()
name_field = fields.get('name')
for validator in name_field.validators:
if isinstance(validator, RegexValidator):
if validator.regex.pattern == r'^[-a-zA-Z0-9_]+$':
validator.regex = re.compile(r'^[-\w]+\Z')
return fields
| 28.255319
| 75
| 0.601657
|
3c7764aadb8a1c7b984a886dde6dc14171f47671
| 959
|
py
|
Python
|
file_encrypter/file_encrypter.py
|
anshul2807/Automation-scripts
|
1830437fc9cf5f97b1f5f194a704fb247849ef09
|
[
"MIT"
] | 496
|
2020-10-07T15:45:34.000Z
|
2022-03-29T16:40:30.000Z
|
file_encrypter/file_encrypter.py
|
anshul2807/Automation-scripts
|
1830437fc9cf5f97b1f5f194a704fb247849ef09
|
[
"MIT"
] | 550
|
2020-10-07T15:31:53.000Z
|
2022-03-20T22:00:38.000Z
|
file_encrypter/file_encrypter.py
|
anshul2807/Automation-scripts
|
1830437fc9cf5f97b1f5f194a704fb247849ef09
|
[
"MIT"
] | 388
|
2020-10-07T15:45:21.000Z
|
2022-03-27T14:54:46.000Z
|
import pyAesCrypt
import sys
import re
bufferSize = 64 * 1024
def check_password(password):
"""
function to check the strength of password
"""
regex = re.compile('[@_!#$%^&*()<>?/\\|}{~:]')
t1 = len(password) >= 8
t2 = not (regex.search(password) is None)
t3 = any(c.islower() for c in password)
t4 = any(c.isupper() for c in password)
t5 = any(c.isdigit() for c in password)
if t1 and t2 and t3 and t4 and t5:
return True
else:
return False
password = input("password for encrypting : ")
password_strength = check_password(password)
while not password_strength:
print("WEAK Password")
print("Please enter a strong password")
password = input("password for encrypting : ")
password_strength = check_password(password)
pyAesCrypt.encryptFile(sys.argv[1], f"{sys.argv[1]}.aes", password, bufferSize)
print(f"FILE HAS BEEN ENCRYPTED SUCCESSFULLY. STORED IN {sys.argv[1]}.aes")
| 25.918919
| 79
| 0.661105
|
9267600365d9f9fda316fba3f05572d7efd4dd64
| 522
|
py
|
Python
|
tradeaccounts/urls.py
|
bizeasy17/investtrack
|
3840948896573f3906a5df80ea80859a492f4133
|
[
"MIT"
] | null | null | null |
tradeaccounts/urls.py
|
bizeasy17/investtrack
|
3840948896573f3906a5df80ea80859a492f4133
|
[
"MIT"
] | 3
|
2021-07-15T13:23:28.000Z
|
2021-12-09T03:32:16.000Z
|
tradeaccounts/urls.py
|
bizeasy17/investtrack
|
3840948896573f3906a5df80ea80859a492f4133
|
[
"MIT"
] | 1
|
2021-08-19T14:42:59.000Z
|
2021-08-19T14:42:59.000Z
|
from . import views
from django.urls import path, re_path
app_name = 'trade_account'
urlpatterns = [
path('',
views.TradeAccountsHomeView.as_view(), name='index'),
path('create/',
views.create_tradeaccount, name='create'),
path('comments/<ts_code>/<position_id>/',
views.position_comments, name='comments'),
# path('/positions/',
# views.positions, name='all_positions'),
# path('/<account_id>/position/',
# views.position, name='position_by_account'),
]
| 30.705882
| 62
| 0.639847
|
be492e22e38027f342c10e35a7fe19ed4cf039b5
| 416
|
py
|
Python
|
reports/migrations/0014_report_keyword.py
|
Daniel-Hoerauf/cs3240-f16-team11
|
0377bcb8ede9599c04daf7683b065abb2a47f893
|
[
"MIT"
] | null | null | null |
reports/migrations/0014_report_keyword.py
|
Daniel-Hoerauf/cs3240-f16-team11
|
0377bcb8ede9599c04daf7683b065abb2a47f893
|
[
"MIT"
] | null | null | null |
reports/migrations/0014_report_keyword.py
|
Daniel-Hoerauf/cs3240-f16-team11
|
0377bcb8ede9599c04daf7683b065abb2a47f893
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reports', '0013_report_file_encrypted'),
]
operations = [
migrations.AddField(
model_name='report',
name='keyword',
field=models.CharField(default='', max_length=32),
),
]
| 20.8
| 62
| 0.605769
|
c5d161575be0e6ab71b43ca4d137ce2600ded339
| 498
|
py
|
Python
|
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_09_30_preview/version.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_09_30_preview/version.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_09_30_preview/version.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
VERSION = "2019-04-30"
| 35.571429
| 76
| 0.524096
|
3deefc6d3858b5b59d6884cc927e28b0c35e3935
| 11,960
|
py
|
Python
|
scripts/TestHarness/testers/RavenFramework.py
|
dgarrett622/raven
|
f36cc108f7500b0e2717df4832b69b801b43960d
|
[
"Apache-2.0"
] | null | null | null |
scripts/TestHarness/testers/RavenFramework.py
|
dgarrett622/raven
|
f36cc108f7500b0e2717df4832b69b801b43960d
|
[
"Apache-2.0"
] | null | null | null |
scripts/TestHarness/testers/RavenFramework.py
|
dgarrett622/raven
|
f36cc108f7500b0e2717df4832b69b801b43960d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RavenFramework is a tool to test raven inputs.
"""
from __future__ import absolute_import
import os
import subprocess
import sys
import platform
from Tester import Tester
import OrderedCSVDiffer
import UnorderedCSVDiffer
import XMLDiff
import TextDiff
import ExistsDiff
import RAVENImageDiff
# Set this outside the class because the framework directory is constant for
# each instance of this Tester, and in addition, there is a problem with the
# path by the time you call it in __init__ that causes it to think its absolute
# path is somewhere under tests/framework.
# Be aware that if this file changes its location, this variable should also be
# changed.
myDir = os.path.dirname(os.path.realpath(__file__))
RAVENDIR = os.path.abspath(os.path.join(myDir, '..', '..', '..', 'ravenframework'))
RAVENROOTDIR = os.path.abspath(os.path.join(myDir, '..', '..', '..'))
#add path so framework is found.
sys.path.append(os.path.abspath(os.path.dirname(RAVENDIR)))
#Need to add the directory for AMSC for doing module checks.
os.environ["PYTHONPATH"] = os.path.join(RAVENDIR, '..', 'install') +\
os.pathsep + os.environ.get("PYTHONPATH", "")
scriptDir = os.path.abspath(os.path.join(RAVENDIR, '..', 'scripts'))
sys.path.append(scriptDir)
import library_handler
sys.path.pop()
_missingModules, _notQAModules = library_handler.checkLibraries()
_checkVersions = library_handler.checkVersions()
class RavenFramework(Tester):
"""
RavenFramework is the class to use for testing standard raven inputs.
"""
@staticmethod
def get_valid_params():
"""
Returns the parameters that can be used for this class.
@ In, None
@ Out, params, _ValidParameters, return the parameters.
"""
params = Tester.get_valid_params()
params.add_required_param('input', "The input file to use for this test.")
params.add_param('output', '', "List of output files that the input should create.")
params.add_param('csv', '', "List of csv files to check")
params.add_param('UnorderedCsv', '', "List of unordered csv files to check")
params.add_param('xml', '', "List of xml files to check")
params.add_param('UnorderedXml', '', "List of unordered xml files to check")
params.add_param('xmlopts', '', "Options for xml checking")
params.add_param('text', '', "List of generic text files to check")
params.add_param('comment', '-20021986', "Character or string denoting "+
"comments, all text to the right of the symbol will be "+
"ignored in the diff of text files")
params.add_param('image', '', "List of image files to check")
params.add_param('rel_err', '', 'Relative Error for csv files or floats in xml ones')
params.add_param('required_executable', '', 'Skip test if this executable is not found')
params.add_param('required_libraries', '', 'Skip test if any of these libraries are not found')
params.add_param('minimum_library_versions', '',
'Skip test if the library listed is below the supplied'+
' version (e.g. minimum_library_versions = \"name1 version1 name2 version2\")')
params.add_param('skip_if_env', '', 'Skip test if this environmental variable is defined')
params.add_param('skip_if_OS', '', 'Skip test if the operating system defined')
params.add_param('test_interface_only', False,
'Test the interface only (without running the driven code')
params.add_param('check_absolute_value', False,
'if true the values are compared to the tolerance '+
'directectly, instead of relatively.')
params.add_param('zero_threshold', sys.float_info.min*4.0,
'it represents the value below which a float is'+
'considered zero (XML comparison only)')
params.add_param('remove_whitespace', False,
'Removes whitespace before comparing xml node text if True')
params.add_param('remove_unicode_identifier', False,
'if true, then remove u infront of a single quote')
params.add_param('interactive', False,
'if true, then RAVEN will be run with interactivity enabled.')
params.add_param('python3_only', False, 'if true, then only use with Python3')
params.add_param('ignore_sign', False, 'if true, then only compare the absolute values')
return params
def get_command(self):
"""
Gets the raven command to run this test.
@ In, None
@ Out, get_command, string, command to run.
"""
ravenflag = ''
if self.specs['test_interface_only']:
ravenflag += ' interfaceCheck '
if self.specs['interactive']:
ravenflag += ' interactiveCheck '
return self._get_python_command() + " " + self.driver + " " + ravenflag + self.specs["input"]
def __make_differ(self, specName, differClass, extra=None):
"""
This adds a differ if the specName has files.
@ In, specName, string of the list of files to use with the differ.
@ In, differClass, subclass of Differ, for use with the files.
@ In, extra, dictionary, extra parameters
@ Out, None
"""
if len(self.specs[specName]) == 0:
#No files, so quit
return
differParams = dict(self.specs)
differParams["output"] = self.specs[specName]
differParams["type"] = differClass.__name__
if extra is not None:
differParams.update(extra)
self.add_differ(differClass(specName, differParams, self.get_test_dir()))
def __init__(self, name, params):
Tester.__init__(self, name, params)
self.all_files = []
self.__make_differ('output', ExistsDiff.Exists)
self.__make_differ('csv', OrderedCSVDiffer.OrderedCSV)
self.__make_differ('UnorderedCsv', UnorderedCSVDiffer.UnorderedCSV)
self.__make_differ('xml', XMLDiff.XML, {"unordered":False})
self.__make_differ('UnorderedXml', XMLDiff.XML, {"unordered":True})
self.__make_differ('text', TextDiff.Text)
self.__make_differ('image', RAVENImageDiff.ImageDiffer)
self.required_executable = self.specs['required_executable']
self.required_libraries = self.specs['required_libraries'].split(' ')\
if len(self.specs['required_libraries']) > 0 else []
self.minimum_libraries = self.specs['minimum_library_versions'].split(' ')\
if len(self.specs['minimum_library_versions']) > 0 else []
self.required_executable = self.required_executable.replace("%METHOD%",
os.environ.get("METHOD", "opt"))
self.specs['scale_refine'] = False
self.driver = os.path.join(RAVENROOTDIR, 'raven_framework.py')
def check_runnable(self):
"""
Checks if this test can run.
@ In, None
@ Out, check_runnable, boolean, if True can run this test.
"""
# remove tests based on skipping criteria
## required module is missing
if _missingModules:
self.set_fail('skipped (Missing python modules: '+" ".join([m[0] for m in _missingModules])+
" PYTHONPATH="+os.environ.get("PYTHONPATH", "")+')')
return False
## required module is present, but too old
if _notQAModules and _checkVersions:
self.set_fail('skipped (Incorrectly versioned python modules: ' +
" ".join(['required {}-{}, but found {}'.format(*m) for m in _notQAModules]) +
" PYTHONPATH="+os.environ.get("PYTHONPATH", "")+')')
return False
## an environment varible value causes a skip
if len(self.specs['skip_if_env']) > 0:
envVar = self.specs['skip_if_env']
if envVar in os.environ:
self.set_skip('skipped (found environmental variable "'+envVar+'")')
return False
## OS
if len(self.specs['skip_if_OS']) > 0:
skipOs = [x.strip().lower() for x in self.specs['skip_if_OS'].split(',')]
# get simple-name platform (options are Linux, Windows, Darwin, or SunOS that I've seen)
currentOs = platform.system().lower()
# replace Darwin with more expected "mac"
if currentOs == 'darwin':
currentOs = 'mac'
if currentOs in skipOs:
self.set_skip('skipped (OS is "{}")'.format(currentOs))
return False
for lib in self.required_libraries:
found, _, _ = library_handler.checkSingleLibrary(lib)
if not found:
self.set_skip('skipped (Unable to import library: "{}")'.format(lib))
return False
if self.specs['python3_only'] and not library_handler.inPython3():
self.set_skip('Python 3 only')
return False
i = 0
if len(self.minimum_libraries) % 2:
self.set_skip('skipped (libraries are not matched to versions numbers: '
+str(self.minimum_libraries)+')')
return False
while i < len(self.minimum_libraries):
libraryName = self.minimum_libraries[i]
libraryVersion = self.minimum_libraries[i+1]
found, _, actualVersion = library_handler.checkSingleLibrary(libraryName, version='check')
if not found:
self.set_skip('skipped (Unable to import library: "'+libraryName+'")')
return False
if library_handler.parseVersion(actualVersion) < \
library_handler.parseVersion(libraryVersion):
self.set_skip('skipped (Outdated library: "'+libraryName+'" needed version '+str(libraryVersion)+' but had version '+str(actualVersion)+')')
return False
i += 2
if len(self.required_executable) > 0 and \
not os.path.exists(self.required_executable):
self.set_skip('skipped (Missing executable: "'+self.required_executable+'")')
return False
try:
if len(self.required_executable) > 0 and \
subprocess.call([self.required_executable], stdout=subprocess.PIPE) != 0:
self.set_skip('skipped (Failing executable: "'+self.required_executable+'")')
return False
except Exception as exp:
self.set_skip('skipped (Error when trying executable: "'
+self.required_executable+'")'+str(exp))
return False
filenameSet = set()
duplicateFiles = []
for filename in self.__get_created_files():
if filename not in filenameSet:
filenameSet.add(filename)
else:
duplicateFiles.append(filename)
if len(duplicateFiles) > 0:
self.set_skip('[incorrect test] duplicated files specified: '+
" ".join(duplicateFiles))
return False
return True
def __get_created_files(self):
"""
Returns all the files used by this test that need to be created
by the test. Note that they will be deleted at the start of running
the test.
@ In, None
@ Out, createdFiles, [str], list of files created by the test.
"""
runpath = self.get_test_dir()
removeFiles = self.get_differ_remove_files()
return removeFiles+list(os.path.join(runpath, file) for file in self.all_files)
def prepare(self):
"""
Get the test ready to run by removing files that should be created.
@ In, None
@ Out, None
"""
for filename in self.__get_created_files():
if os.path.exists(filename):
os.remove(filename)
def process_results(self, _):
"""
Check to see if the test has passed.
@ In, ignored, string, output of test.
@ Out, None
"""
self.set_success()
| 43.333333
| 148
| 0.666806
|
221d8ad89f9874562fa463d34b577a3c99252813
| 600
|
py
|
Python
|
handlers/users/start.py
|
vlsh1n/ChatBot_PlekhanovUniversity
|
fa28ee623a2f020b5ddddb45655444bc537f0c53
|
[
"MIT"
] | null | null | null |
handlers/users/start.py
|
vlsh1n/ChatBot_PlekhanovUniversity
|
fa28ee623a2f020b5ddddb45655444bc537f0c53
|
[
"MIT"
] | null | null | null |
handlers/users/start.py
|
vlsh1n/ChatBot_PlekhanovUniversity
|
fa28ee623a2f020b5ddddb45655444bc537f0c53
|
[
"MIT"
] | null | null | null |
from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandStart
from keyboards.default import menu
from loader import dp
# Хэндлер, который обрабатывает команду /start
@dp.message_handler(CommandStart())
async def bot_start(message: types.Message):
await message.answer(f"Привет, {message.from_user.full_name}!\n"
f"Я - чатбот VIII Школы Актива РЭУ им. Г.В.Плеханова!\n"
f"У меня ты сможешь узнать всю необходимую информацию, касаемо Школы Актива!")
await message.answer('Выберите нужную кнопку', reply_markup=menu)
| 37.5
| 103
| 0.723333
|
704ca0d87e141ac1e4d61b76781ebc5de76dbe7e
| 625
|
py
|
Python
|
codes/chapter18-3/henango/urls/resolver.py
|
uenoka/introduction-to-web-application-with-python
|
99ec28151fa8388a21c96e5d39902641961c72bb
|
[
"MIT"
] | 22
|
2020-11-04T01:57:20.000Z
|
2022-03-16T06:20:32.000Z
|
codes/chapter18-3/henango/urls/resolver.py
|
uenoka/introduction-to-web-application-with-python
|
99ec28151fa8388a21c96e5d39902641961c72bb
|
[
"MIT"
] | 17
|
2020-11-03T14:28:12.000Z
|
2022-03-26T08:04:50.000Z
|
codes/chapter18-3/henango/urls/resolver.py
|
uenoka/introduction-to-web-application-with-python
|
99ec28151fa8388a21c96e5d39902641961c72bb
|
[
"MIT"
] | 14
|
2020-11-04T02:17:49.000Z
|
2022-03-12T11:32:41.000Z
|
from typing import Callable, Optional
from henango.http.request import HTTPRequest
from henango.http.response import HTTPResponse
from urls import url_patterns
class URLResolver:
def resolve(self, request: HTTPRequest) -> Optional[Callable[[HTTPRequest], HTTPResponse]]:
"""
URL解決を行う
pathにマッチするURLパターンが存在した場合は、対応するviewを返す
存在しなかった場合は、Noneを返す
"""
for url_pattern in url_patterns:
match = url_pattern.match(request.path)
if match:
request.params.update(match.groupdict())
return url_pattern.view
return None
| 28.409091
| 95
| 0.6672
|
33edf3c69c7dfc30869621fd4d8e92ffad84de70
| 1,598
|
py
|
Python
|
sdk/cognitiveservices/azure-cognitiveservices-formrecognizer/azure/cognitiveservices/formrecognizer/models/extracted_token.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/cognitiveservices/azure-cognitiveservices-formrecognizer/azure/cognitiveservices/formrecognizer/models/extracted_token.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/cognitiveservices/azure-cognitiveservices-formrecognizer/azure/cognitiveservices/formrecognizer/models/extracted_token.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExtractedToken(Model):
"""Canonical representation of single extracted text.
:param text: String value of the extracted text.
:type text: str
:param bounding_box: Bounding box of the extracted text. Represents the
location of the extracted text as a pair of
cartesian co-ordinates. The co-ordinate pairs are arranged by
top-left, top-right, bottom-right and bottom-left endpoints box
with origin reference from the bottom-left of the page.
:type bounding_box: list[float]
:param confidence: A measure of accuracy of the extracted text.
:type confidence: float
"""
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'bounding_box': {'key': 'boundingBox', 'type': '[float]'},
'confidence': {'key': 'confidence', 'type': 'float'},
}
def __init__(self, **kwargs):
super(ExtractedToken, self).__init__(**kwargs)
self.text = kwargs.get('text', None)
self.bounding_box = kwargs.get('bounding_box', None)
self.confidence = kwargs.get('confidence', None)
| 38.97561
| 76
| 0.621402
|
bd8807c4c533dbc8d532c18d4049b20334617c9f
| 224
|
py
|
Python
|
app.py
|
wang2542/DevOps
|
cf268609974782a780215d8ceb6c90ff80982262
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
wang2542/DevOps
|
cf268609974782a780215d8ceb6c90ff80982262
|
[
"Apache-2.0"
] | 1
|
2022-02-14T01:46:44.000Z
|
2022-02-14T01:52:43.000Z
|
app.py
|
wang2542/DevOps
|
cf268609974782a780215d8ceb6c90ff80982262
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
COUNTER = 0
@app.route("/")
def index():
return "Hello World!"
@app.route("/counter")
def counter():
global COUNTER
COUNTER += 1
return(dict(counter=COUNTER))
| 14.933333
| 33
| 0.651786
|
d2c8cd7fba7dd605a6a7ed619c97af6edd01ebe5
| 1,119
|
py
|
Python
|
src/cltl/face_recognition/api.py
|
leolani/cltl-face-recognition
|
b98e4791c9943b4a4a91d00e2ede5ead2d5fa61f
|
[
"MIT"
] | null | null | null |
src/cltl/face_recognition/api.py
|
leolani/cltl-face-recognition
|
b98e4791c9943b4a4a91d00e2ede5ead2d5fa61f
|
[
"MIT"
] | null | null | null |
src/cltl/face_recognition/api.py
|
leolani/cltl-face-recognition
|
b98e4791c9943b4a4a91d00e2ede5ead2d5fa61f
|
[
"MIT"
] | null | null | null |
import abc
import dataclasses
from typing import List, Optional, Iterable, Tuple
import numpy as np
from cltl.backend.api.camera import Bounds
from emissor.representation.entity import Gender
@dataclasses.dataclass
class Face:
"""
Information about a Face.
Includes a vector representation of the face and optional meta information.
"""
# TODO switch to np.typing.ArrayLike
embedding: np.ndarray
gender: Optional[Gender]
age: Optional[int]
class FaceDetector(abc.ABC):
"""
Detect faces in an image.
"""
def detect(self, image: np.ndarray) -> Tuple[Iterable[Face], Iterable[Bounds]]:
"""
Detect faces in an image.
Parameters
----------
image : np.ndarray
The binary image.
Returns
-------
Iterable[Face]
The faces detected in the image.
Iterable[Bounds]
The positions of the detected faces in the image.
"""
raise NotImplementedError()
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
| 22.38
| 83
| 0.623771
|
5d825b7649293ff9c6241b2d353f07292534dbd5
| 649
|
py
|
Python
|
blocketwatch/__init__.py
|
chip2n/blocket-watch
|
988cb7c6eb4aad0f4655f9ce7dcb1dc75945a4ed
|
[
"Apache-2.0"
] | null | null | null |
blocketwatch/__init__.py
|
chip2n/blocket-watch
|
988cb7c6eb4aad0f4655f9ce7dcb1dc75945a4ed
|
[
"Apache-2.0"
] | null | null | null |
blocketwatch/__init__.py
|
chip2n/blocket-watch
|
988cb7c6eb4aad0f4655f9ce7dcb1dc75945a4ed
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# coding=UTF8
#
# Copyright 2013 Andreas Arvidsson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from blocketwatch import main
| 34.157895
| 74
| 0.75963
|
3693ec784eb9f7578e5dc83cd1038410dad5e23e
| 1,390
|
py
|
Python
|
chrome/test/enterprise/e2e/policy/webprotect_file_download/webprotect_file_download.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
chrome/test/enterprise/e2e/policy/webprotect_file_download/webprotect_file_download.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
chrome/test/enterprise/e2e/policy/webprotect_file_download/webprotect_file_download.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from chrome_ent_test.infra.core import category, environment, before_all, test
from infra import ChromeEnterpriseTestCase
@category("chrome_only")
@environment(file="../webprotect_test.asset.textpb")
class WebProtectFileDownloadTest(ChromeEnterpriseTestCase):
"""Test the WebProtect client behaviour.
Here are the set of E2E test cases for testing chrome download behavior
when webprotect is enabled. The purpose of these tests is to catch chrome
client UI regression.
"""
@before_all
def setup(self):
self.InstallChrome('webprotect-1')
self.EnableUITest('webprotect-1')
@test
def test_malware_scan_download(self):
self.SetPolicy('win2016-dc', r'CloudManagementEnrollmentToken',
'856f301d-cfda-414d-97a6-1bfb46d94293', 'String')
instance_name = 'webprotect-1'
self.RunCommand(instance_name, 'gpupdate /force')
local_dir = os.path.dirname(os.path.abspath(__file__))
output = self.RunUITest(
instance_name,
os.path.join(local_dir, 'webprotect_file_download_webdriver.py'))
self.assertIn('Encrypted blocked', output)
self.assertIn('Large file blocked', output)
self.assertIn('Unknown malware scanning', output)
| 34.75
| 78
| 0.741007
|
d0d6ad4ab24f53c62a3876ddce604dae6804e771
| 5,102
|
py
|
Python
|
ncc/eval/summarization/smoothed_bleu.py
|
iwangyuezhang/naturalcc
|
e9d9b4a296b61199fc35779b062db2205935a608
|
[
"MIT"
] | 1
|
2022-03-30T14:45:42.000Z
|
2022-03-30T14:45:42.000Z
|
ncc/eval/summarization/smoothed_bleu.py
|
hrshy0629/naturalcc
|
9c3329dd8387c8242deb52bf590ebe3ac795f8de
|
[
"MIT"
] | null | null | null |
ncc/eval/summarization/smoothed_bleu.py
|
hrshy0629/naturalcc
|
9c3329dd8387c8242deb52bf590ebe3ac795f8de
|
[
"MIT"
] | null | null | null |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python implementation of BLEU and smooth-BLEU.
This module provides a Python implementation of BLEU and smooth-BLEU.
Smooth BLEU is computed following the method outlined in the paper:
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
evaluation metrics for machine translation. COLING 2004.
"""
import collections
import math
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_smoothed_bleu(reference_corpus, translation_corpus, max_order=4, smooth=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus, translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for order in range(1, max_order + 1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order - 1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
def _bleu(ref_file, trans_file, subword_option=None):
max_order = 4
smooth = True
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with open(reference_filename, encoding='utf-8') as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference_list.append(reference.strip().split())
per_segment_references.append(reference_list)
translations = []
with open(trans_file, encoding='utf-8') as fh:
for line in fh:
translations.append(line.strip().split())
bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)
return round(100 * bleu_score, 2)
| 39.859375
| 101
| 0.662681
|
f9560932d4885c0ac16585aefdafca8a0b807956
| 205
|
py
|
Python
|
23_Aug_2021_re-exam/project_astro/astronaut/meteorologist.py
|
vasetousa/OOP
|
e4fedc497dd149c9800613ea11846e0e770d122c
|
[
"MIT"
] | null | null | null |
23_Aug_2021_re-exam/project_astro/astronaut/meteorologist.py
|
vasetousa/OOP
|
e4fedc497dd149c9800613ea11846e0e770d122c
|
[
"MIT"
] | null | null | null |
23_Aug_2021_re-exam/project_astro/astronaut/meteorologist.py
|
vasetousa/OOP
|
e4fedc497dd149c9800613ea11846e0e770d122c
|
[
"MIT"
] | null | null | null |
from project_astro.astronaut.astronaut import Astronaut
class Meteorologist(Astronaut):
def __init__(self, name):
super().__init__(name, 90)
def breathe(self):
self.oxygen -= 15
| 20.5
| 55
| 0.687805
|
799a677237acd301922d64d0572901167a81052c
| 732
|
py
|
Python
|
Unit_4/Data_Array_Manipulation.py
|
coffeelabor/CIS289_Reed_James
|
6d1bc126d6b50411f2bad1d65cfeebd47b68a6ff
|
[
"MIT"
] | null | null | null |
Unit_4/Data_Array_Manipulation.py
|
coffeelabor/CIS289_Reed_James
|
6d1bc126d6b50411f2bad1d65cfeebd47b68a6ff
|
[
"MIT"
] | null | null | null |
Unit_4/Data_Array_Manipulation.py
|
coffeelabor/CIS289_Reed_James
|
6d1bc126d6b50411f2bad1d65cfeebd47b68a6ff
|
[
"MIT"
] | null | null | null |
'''
/***************************************************************
* Name: Pandas Data and Array Manipulation Program
* Author: Reed James
* Created: 22 Sept 2021
* Course: CIS 289 - Python
* Version: Python 3.8.2
* OS: Windows 10
* Copyright: This is my own original work based on
* specifications issued by our instructor
* Description:
* Input:
* Output:
* Academic Honesty: I attest that this is my original work.
* I have not used unauthorized source code, either modified or
* unmodified. I have not given other fellow student(s) access to my program.
***************************************************************/
'''
import pandas as pd
if __name__ == "__main__":
pass
| 30.5
| 76
| 0.556011
|
2f2b382066cf85165208a5e3a9e7ca4e4e879687
| 1,784
|
py
|
Python
|
usuarios/models.py
|
MarcosBB/E-commerce
|
2a7c2bfd4512eb212eeec3bd49c65c70a65e6593
|
[
"MIT"
] | 2
|
2021-07-27T19:27:15.000Z
|
2021-08-16T18:26:22.000Z
|
usuarios/models.py
|
MarcosBB/E-commerce
|
2a7c2bfd4512eb212eeec3bd49c65c70a65e6593
|
[
"MIT"
] | null | null | null |
usuarios/models.py
|
MarcosBB/E-commerce
|
2a7c2bfd4512eb212eeec3bd49c65c70a65e6593
|
[
"MIT"
] | null | null | null |
from django.db import models
import uuid
from stdimage.models import StdImageField
from django.contrib.auth.models import AbstractUser, BaseUserManager
def get_file_path(_instance, filename):
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return filename
class UsuarioManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
if not email:
raise ValueError('O nome de e-mail é obrigatório!')
email = self.normalize_email(email)
user = self.model(email=email, username=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_staff', True)
return self._create_user(email, password, **extra_fields)
class CustomUsuario(AbstractUser):
email = models.EmailField('E-mail', unique=True)
apelido = models.CharField('Apelido', max_length=100, unique=True)
nome = models.CharField('Nome', max_length=100)
sobrenome = models.CharField('Sobrenome', max_length=100)
imagem = StdImageField('Imagem', upload_to=get_file_path, variations={'thumb': {'width': 480, 'height': 480, 'crop': True}})
is_staff = models.BooleanField('Membro da equipe', default=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['apelido','nome', 'sobrenome', 'imagem']
def __str__(self):
return self.email
objects = UsuarioManager()
| 37.166667
| 128
| 0.696749
|
51acd17d9ec1e48eabe884b82c88db8d5eeda3ea
| 265
|
py
|
Python
|
newdust/graindist/composition/__init__.py
|
eblur/newdust
|
7e843ae2604a844826606ea04c459694fdd5c178
|
[
"BSD-2-Clause"
] | 4
|
2018-02-04T19:04:01.000Z
|
2022-02-09T04:11:18.000Z
|
newdust/graindist/composition/__init__.py
|
eblur/newdust
|
7e843ae2604a844826606ea04c459694fdd5c178
|
[
"BSD-2-Clause"
] | 21
|
2017-08-15T21:13:42.000Z
|
2021-12-23T20:07:24.000Z
|
newdust/graindist/composition/__init__.py
|
eblur/newdust
|
7e843ae2604a844826606ea04c459694fdd5c178
|
[
"BSD-2-Clause"
] | 1
|
2021-01-28T18:29:12.000Z
|
2021-01-28T18:29:12.000Z
|
import os
def _find_cmfile(name):
root_path = os.path.dirname(__file__).rstrip('composition')
data_path = root_path + 'tables/'
return data_path + name
from .cmdrude import CmDrude
from .cmsilicate import CmSilicate
from .cmgraphite import CmGraphite
| 24.090909
| 63
| 0.758491
|
920f43c98c03f2b0a7a1286e87634b8055207966
| 1,874
|
gyp
|
Python
|
binding.gyp
|
ionagamed/node-rsvg
|
bad9eb5e0d52dc9d128ee24ce304f3091a9d2afb
|
[
"MIT"
] | 32
|
2015-10-04T03:39:54.000Z
|
2020-11-09T14:59:30.000Z
|
binding.gyp
|
ionagamed/node-rsvg
|
bad9eb5e0d52dc9d128ee24ce304f3091a9d2afb
|
[
"MIT"
] | 16
|
2016-04-04T14:57:30.000Z
|
2019-09-09T12:18:58.000Z
|
binding.gyp
|
ionagamed/node-rsvg
|
bad9eb5e0d52dc9d128ee24ce304f3091a9d2afb
|
[
"MIT"
] | 25
|
2015-10-17T09:58:01.000Z
|
2021-06-23T07:43:03.000Z
|
{
"variables": {
"GTK_Root%": "c:\\gtk",
"conditions": [
[ "OS == 'mac'", {
"pkg_env": "PKG_CONFIG_PATH=/opt/X11/lib/pkgconfig"
}, {
"pkg_env": ""
}]
]
},
"targets": [
{
"target_name": "rsvg",
"sources": [
"src/Rsvg.cc",
"src/Enums.cc",
"src/Autocrop.cc"
],
"include_dirs": [
"<!(node -e \"require('nan')\")"
],
"variables": {
"packages": "librsvg-2.0 cairo-png cairo-pdf cairo-svg",
"conditions": [
[ "OS!='win'", {
"libraries": "<!(<(pkg_env) pkg-config --libs-only-l <(packages))",
"ldflags": "<!(<(pkg_env) pkg-config --libs-only-L --libs-only-other <(packages))",
"cflags": "<!(<(pkg_env) pkg-config --cflags <(packages))"
}, { # else OS!='win'
"include_dirs": "<!(<(python) tools/include_dirs.py <(GTK_Root) <(packages))"
} ]
]
},
"conditions": [
[ "OS!='mac' and OS!='win'", {
"cflags": [
"<@(cflags)",
"-std=c++0x"
],
"ldflags": [
"<@(ldflags)"
],
"libraries": [
"<@(libraries)"
],
} ],
[ "OS=='mac'", {
"xcode_settings": {
"OTHER_CFLAGS": [
"<@(cflags)"
],
"OTHER_LDFLAGS": [
"<@(ldflags)"
]
},
"libraries": [
"<@(libraries)"
],
} ],
[ "OS=='win'", {
"sources+": [
"src/win32-math.cc"
],
"include_dirs": [
"<@(include_dirs)"
],
"libraries": [
'librsvg-2.dll.a',
'glib-2.0.lib',
'gobject-2.0.lib',
'cairo.lib'
],
"msvs_settings": {
'VCCLCompilerTool': {
'AdditionalOptions': [
"/EHsc"
]
}
},
"msbuild_settings": {
"Link": {
"AdditionalLibraryDirectories": [
"<(GTK_Root)\\lib"
],
"ImageHasSafeExceptionHandlers": "false"
}
}
} ]
]
}
]
}
| 19.93617
| 89
| 0.437567
|
e2ef6413182e0e3d5e3095aab987250265f0a28f
| 6,904
|
py
|
Python
|
localizedFunc.py
|
FisherEat/python_ios_projects
|
1d1bba34d31ff31b732b5ae573c0eca0a4a0fb7b
|
[
"MIT"
] | null | null | null |
localizedFunc.py
|
FisherEat/python_ios_projects
|
1d1bba34d31ff31b732b5ae573c0eca0a4a0fb7b
|
[
"MIT"
] | null | null | null |
localizedFunc.py
|
FisherEat/python_ios_projects
|
1d1bba34d31ff31b732b5ae573c0eca0a4a0fb7b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import sys
zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
strPattern = re.compile(r'@\"(.*?)(?<!\\)\"')
def cur_file_dir():
# python虚拟机的打开路径
path = sys.path[0]
# 改为上级目录
if os.path.isdir(path):
# return path
return os.path.dirname(path)
elif os.path.isfile(path):
# return os.path.dirname(path)
dir = os.path.dirname(path)
return os.path.dirname(dir)
def outPutlogFile(str):
strings = restringTostring(str)
rootPath = cur_file_dir()
logfile = os.path.join(rootPath, 'log.txt')
with open(logfile, 'a', encoding='utf-8') as f:
f.write(strings)
# localStingPath = os.path.join(cur_file_dir(), 'Localizable.strings')
localStingPath = os.path.join(cur_file_dir(), 'SmartHomeV6/zh-Hans.lproj/Localizable.strings')
def findkeyforstring(string):
key = ''
restr = r'"(\w+)"\s*=\s*"%s";'%(string)
with open(localStingPath, 'r', encoding='utf-8') as f:
text = f.read()
match = re.search(restr, text)
if match != None:
key = match.group(1)
return key
def findstringforkey(en):
string = None
restr = r'"%s"\s*=\s*"(.*)";'%(en)
with open(localStingPath, 'r', encoding='utf-8') as f:
text = f.read()
match = re.search(restr, text)
if match != None:
string = match.group(1)
return string
global isneedwrite
global tempfilelines
def dealwithChinese(string, line):
global tempfilelines
global isneedwrite
key = findkeyforstring(string)
if key == '':
if re.search(r'NSLog\s*\(\s*@"%s"'%string, line) != None:
string = 'NSLog'.ljust(30) + string
# outPutlogFile(string + "\n")
else:
outPutlogFile('not find Key'.ljust(30) + string + "\n")
print(string)
else:
isneedwrite = 1
if re.search(r'NSLog\s*\(\s*@"%s"'%string, line):
print('pass')
elif re.search(r'NSLocalizedString\s*\(\s*@"%s"\s*,\s*([\w\s]+)\s*\)'%string, line) != None:
temp = re.sub(r'NSLocalizedString\s*\(\s*@"%s"\s*,\s*([\w\s]+)\s*\)'%string, r'NSLocalizedString(@"%s", \1)'%key, line)
tempfilelines.pop()
tempfilelines.append(temp)
outPutlogFile(string.ljust(30)+ "modify : " +key + "\n")
elif re.search(r'@"%s"'%string, line) != None:
temp = re.sub(r'@"%s"' % string, 'NSLocalizedString(@"%s", nil)' % key, line)
tempfilelines.pop()
tempfilelines.append(temp)
outPutlogFile(string.ljust(30) + "modify : " + key + "\n")
else:
print('not need')
def dealwithEnglish(string, line):
if re.search(r'NSLocalizedString\s*\(\s*@"%s"\s*,\s*([\w\s]+)\s*\)' % string, line) != None:
result = findstringforkey(string)
if result == None:
outPutlogFile(string + "\n")
def stringTorestring(string):
string = string.replace('\\', '\\\\')
string = string.replace('.', '\.')
string = string.replace('(', '\(')
string = string.replace(')', '\)')
string = string.replace('[', '\[')
string = string.replace(']', '\]')
string = string.replace('{', '\{')
string = string.replace('}', '\}')
string = string.replace('*', '\*')
string = string.replace('+', '\+')
string = string.replace('?', '\?')
string = string.replace('^', '\^')
string = string.replace('$', '\$')
return string
def restringTostring(string):
string = string.replace('\\\\', '\\')
string = string.replace('\.', '.')
string = string.replace('\(', '(')
string = string.replace('\)', ')')
string = string.replace('\[', '[')
string = string.replace('\]', ']')
string = string.replace('\{', '{')
string = string.replace('\}', '}')
string = string.replace('\*', '*')
string = string.replace('\+', '+')
string = string.replace('\?', '?')
string = string.replace('\^', '^')
string = string.replace('\$', '$')
return string
if __name__ == '__main__':
print('shit')
print(restringTostring("'hahg\asdlfj.skdjfl/8/'"))
#
# if __name__ == '__main__':
# xlxs2ios.getLocalizedStringFile()
# global isneedwrite
# isneedwrite = 0
# global tempfilelines
# tempfilelines = []
# print(cur_file_dir())
# cur_Path = cur_file_dir()
# for root, dirs, files in os.walk(cur_Path):
# if root.find("Pods/") < 0 and root.find("Libs/") < 0:
# for file in files:
# if file.endswith(".m"):
# mfile = os.path.join(root, file)
# if os.path.isfile(mfile):
# with open(mfile, 'r', encoding='utf-8') as f:
# tempfilelines = []
# for line in f.readlines():
# tempfilelines.append(line)
# for string in strPattern.findall(line):
#
# if zhPattern.search(string):
# print(line)
# print('{}\n'.format(string))
# try:
# newstring = stringTorestring(string)
# # newstring = string
# dealwithChinese(newstring, line)
# except:
# print("Unexpected error:", sys.exc_info()[0])
# dealwithChinese(string, line)
# # newstring = stringTorestring(string)
# # dealwithChinese(newstring, line)
# else:
# print('{}\n'.format(string))
# try:
# newstring = stringTorestring(string)
# # newstring = string
# dealwithEnglish(newstring, line)
# except:
# print("Unexpected error:", sys.exc_info()[0])
# dealwithEnglish(string, line)
# # newstring = stringTorestring(string)
# # dealwithEnglish(newstring, line)
#
# if isneedwrite == 1:
# with open(mfile, 'w', encoding='utf-8') as f:
# f.writelines(tempfilelines)
| 39.00565
| 131
| 0.467265
|
9ad616e1f75520ff2ca236dca5a0a8792fd18c75
| 1,385
|
py
|
Python
|
part01-e18_acronyms/test/test_acronyms.py
|
alekshiidenhovi/Helsinki-University-Data-Analysis-with-Python
|
bc27fa585d22d630a38312ee7c4b2173d5b80d12
|
[
"MIT"
] | null | null | null |
part01-e18_acronyms/test/test_acronyms.py
|
alekshiidenhovi/Helsinki-University-Data-Analysis-with-Python
|
bc27fa585d22d630a38312ee7c4b2173d5b80d12
|
[
"MIT"
] | null | null | null |
part01-e18_acronyms/test/test_acronyms.py
|
alekshiidenhovi/Helsinki-University-Data-Analysis-with-Python
|
bc27fa585d22d630a38312ee7c4b2173d5b80d12
|
[
"MIT"
] | 2
|
2022-02-14T20:07:29.000Z
|
2022-03-11T07:30:23.000Z
|
#!/usr/bin/env python3
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load, get_stdout
module_name="src.acronyms"
acronyms = load(module_name, "acronyms")
@points('p01-18.1')
class Acronyms(unittest.TestCase):
def test_first(self):
result = acronyms("""For the purposes of the EU General Data Protection Regulation (GDPR), the controller of your personal information is International Business Machines Corporation (IBM Corp.), 1 New Orchard Road, Armonk, New York, United States, unless indicated otherwise. Where IBM Corp. or a subsidiary it controls (not established in the European Economic Area (EEA)) is required to appoint a legal representative in the EEA, the representative for all such cases is IBM United Kingdom Limited, PO Box 41, North Harbour, Portsmouth, Hampshire, United Kingdom PO6 3AU.""")
self.assertIsInstance(result, list, f"acronyms should return a list. Got {type(result)}")
self.assertEqual(result, ['EU', 'GDPR', 'IBM', 'IBM', 'EEA', 'EEA', 'IBM', 'PO', 'PO6', '3AU'])
def test_empty(self):
result = acronyms("")
self.assertIsInstance(result, list, f"acronyms should return a list. Got {type(result)}")
self.assertEqual(result, [], msg="Empty list expected for empty input string!")
if __name__ == '__main__':
unittest.main()
| 46.166667
| 585
| 0.712635
|
b729f835260f114c010289b9cdd35faa4bfdad7f
| 47,391
|
py
|
Python
|
python-trunk/sfapi2/sfutil/report/sfReportCR.py
|
raychorn/svn_molten-magma
|
8aa2ff2340707eecae6514943e86f5afba9cd54a
|
[
"CC0-1.0"
] | null | null | null |
python-trunk/sfapi2/sfutil/report/sfReportCR.py
|
raychorn/svn_molten-magma
|
8aa2ff2340707eecae6514943e86f5afba9cd54a
|
[
"CC0-1.0"
] | null | null | null |
python-trunk/sfapi2/sfutil/report/sfReportCR.py
|
raychorn/svn_molten-magma
|
8aa2ff2340707eecae6514943e86f5afba9cd54a
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python2.3
"""
A class for instantiation by sfNotify to generate a type of report
Not intended to stand alone.
"""
#version = 0.10 # Initial version under development
#version = 1.10 # factor common elements out for use by other reports
version = 2.0 # 10/25/2004 Moved to new API and Task Branches
from sfReportBase import ReportBase
from reportSection import ReportSection
from sfUtil import *
from sfConstant import *
import time
import cStringIO
import re
import pprint
import copy
import textwrap
import types
class CRReport(ReportBase):
def __init__(self, sfTool, userId, contact, sectionList=[]):
ReportBase.__init__(self, sfTool, userId, contact)
self.sectionList = sectionList
self.crList = []
self.modList = []
self.crSectionCore = None
## END __init__
def generateReport(self):
"""
Central method to build CR report
Calls other methods to build chunks of the report.
"""
print "CR REPORT"
date = time.strftime("%A %B %d, %Y",
time.localtime(self.sfTool.reportTime))
subject = "CR Report for %s %s on %s" %(self.contact.get('FirstName'),
self.contact.get('LastName'),
date)
self.subject = subject
# Assemble the sections, adding them to the body
section = 'TestFixes'
if section in self.sectionList:
# My CRs (MODs, really) With Fixes Available
crWithFixSec = CRsWithFixesSection(self.sfTool, self.userId,
self.contact)
crWithFixTxt = crWithFixSec.getSection()
self.body.write(crWithFixTxt)
# note if we have content for the section or not.
self.sectionContentMap[section] = crWithFixSec.hasContent()
section = 'Unassigned'
if section in self.sectionList:
crSecCore = self.prepCrSectionData()
crUnasgSec = UnassignedCrSection(self.sfTool, self.userId,
self.contact, crSecCore)
crUnasgTxt = crUnasgSec.getSection()
self.body.write(crUnasgTxt)
# note if we have content for the section or not.
self.sectionContentMap[section] = crUnasgSec.hasContent()
section = 'RecentClose'
if section in self.sectionList:
crClosedSec = ClosedCrSection(self.sfTool, self.userId,
self.contact)
crClosedTxt = crClosedSec.getSection()
self.body.write(crClosedTxt)
# note if we have content for the section or not.
self.sectionContentMap[section] = crClosedSec.hasContent()
section = 'SCM'
if section in self.sectionList:
crSecCore = self.prepCrSectionData()
crScmSec = ScmCrSection(self.sfTool, self.userId,
self.contact, crSecCore)
crScmTxt = crScmSec.getSection()
self.body.write(crScmTxt)
# note if we have content for the section or not.
self.sectionContentMap[section] = crScmSec.hasContent()
section = 'Team'
if section in self.sectionList:
crSecCore = self.prepCrSectionData()
crTeamSec = TeamCrSection(self.sfTool, self.userId,
self.contact, crSecCore)
crTeamTxt = crTeamSec.getSection()
self.body.write(crTeamTxt)
# note if we have content for the section or not.
self.sectionContentMap[section] = crTeamSec.hasContent()
section = 'PE'
if section in self.sectionList:
crSecCore = self.prepCrSectionData()
crAppPeSec = AppPeCrSection(self.sfTool, self.userId,
self.contact, crSecCore)
crAppPeTxt = crAppPeSec.getSection()
self.body.write(crAppPeTxt)
# note if we have content for the section or not.
self.sectionContentMap[section] = crAppPeSec.hasContent()
section = 'EngMgr'
if section in self.sectionList:
crSecCore = self.prepCrSectionData()
crAppMgrSec = AppMgrCrSection(self.sfTool, self.userId,
self.contact, crSecCore)
crAppMgrTxt = crAppMgrSec.getSection()
self.body.write(crAppMgrTxt)
# note if we have content for the section or not.
self.sectionContentMap[section] = crAppMgrSec.hasContent()
section = 'Dev'
if section in self.sectionList:
crSecCore = self.prepCrSectionData()
crDevSec = DevCrSection(self.sfTool, self.userId,
self.contact, crSecCore)
crDevTxt = crDevSec.getSection()
self.body.write(crDevTxt)
# note if we have content for the section or not.
self.sectionContentMap[section] = crDevSec.hasContent()
section = 'Untouched'
if section in self.sectionList:
crSecCore = self.prepCrSectionData()
crUntouchSec = UntouchedCrSection(self.sfTool, self.userId,
self.contact, crSecCore)
crUntouchTxt = crUntouchSec.getSection()
self.body.write(crUntouchTxt)
# note if we have content for the section or not.
self.sectionContentMap[section] = crUntouchSec.hasContent()
self.body.write(self.generateReportFooter())
## END generateReport(self)
def prepCrSectionData(self):
if self.crSectionCore is None:
self.crSectionCore = CrSectionCore(self.sfTool, self.userId,
self.contact)
return self.crSectionCore
## END prepCrSectionData
## END class CRReport
class ReportQueryMixin:
""" Provides a collection of queries used to build related reports so that
such code may be shared.
"""
seriesSize = 15
def fetchOriginatorTestingBAs(self):
"""
Fetch CR Originator BAs which have the status Merged - Testing
by Originator and the Approve field is not marked 'Approve'
This query will need to change to find BAs which are of role
CR Originator, Status = Merged - Testing by Originator and
Approve != Approve.
returns map of BAs keyed by task (or team?) branch
"""
baList = []
f1 = ['Approval_Role__c','=','CR Originator']
f2 = ['Status__c','=','Merged - Testing by Originator']
f3 = ['Approve__c','!=','Approve']
f4 = ['Approve__c','!=','Reject']
f5 = ['OwnerId','=',self.userId]
where = [f1, 'and', f2, 'and', f3, 'and', f4, 'and', f5]
print "QUERY ........ %s" %where
baList = self.sfTool.query(BRANCH_APPROVAL_OBJ, where, sc='all')
if baList in BAD_INFO_LIST:
baList = []
pass
baMap = {}
tbIdList = []
baByTbCrMap = {}
for ba in baList:
tbId = ba.get('Task_Branch__c')
aeBaCrNum = ba.get('CR_List__c','').lstrip('0')
print "Task Branch Id %s .... and CR num %s ..." %(tbId,aeBaCrNum)
if tbId is None:
continue
if tbId not in tbIdList:
tbIdList.append(tbId)
key = '%s:%s' %(tbId, aeBaCrNum)
if baByTbCrMap.has_key(key):
msg = "Already saw an AE BA on task branch %s for CR num %s" \
%(tbId, aeBaCrNum)
self.sfTool.setLog(msg, 'warn')
baByTbCrMap[key] = ba
if baMap.has_key(tbId):
baMap[tbId].append(ba)
else:
baMap[tbId] = [ba]
pass
continue
#return baMap
return tbIdList, baByTbCrMap
## END fetchUserTestingBAa
def retrieveTaskBranches(self, taskBranchIdList):
"""Performs a retrieve of the supplied task branch list
"""
tbFieldList = ('Id','Branch_Status__c','Code_Stream__c',
'Merged_Date_Time__c')
tbList = self.sfTool.retrieve(taskBranchIdList, TASK_BRANCH_OBJ,
fieldList=tbFieldList)
if tbList in BAD_INFO_LIST:
tbList = []
pass
tbMap = {}
for tb in tbList:
tbMap[tb.get('Id')] = tb
continue
return tbMap
## END retrieveTaskBranches
def fetchBranchCRLinks(self, taskBranchIdList):
"""Performs a series of queries to find BranchCR links on supplied
TaskBranchIds.
Returns two maps relating branch IDs to CR IDs and vice versa
"""
myTbIdList = copy.deepcopy(taskBranchIdList)
# divvy ID list into subgroups of no more than seriesSize
brCrList = []
while len(myTbIdList) > 0:
series = myTbIdList[:self.seriesSize]
myTbIdList = myTbIdList[self.seriesSize:]
# Build where list for the series and query the links, appending
# results to a list
where = []
for tbId in series:
where.append(['Task_Branch__c','=',tbId])
where.append('or')
continue
# delete the straggling 'or'
if len(where) > 1:
where.pop()
pass
# run the query with the where clause
res = self.sfTool.query(BRANCH_CR_LINK_OBJ, where,
sc='brcrl')
if res not in BAD_INFO_LIST and \
type(res) in [types.ListType, types.TupleType]:
brCrList.extend(res)
pass
continue
return self.processFetchedBranchCrLinks(brCrList)
## END fetchBranchCRLinks
def fetchCrBranchLinks(self, crIdList):
brCrMap = {}
crBrMap = {}
# divvy ID list into subgroups of no more than seriesSize
brCrList = []
while len(crIdList) > 0:
series = crIdList[:self.seriesSize]
crIdList = crIdList[self.seriesSize:]
where = []
for crId in series:
#crId = cr['Id']
where.append(['Case__c','=',crId])
where.append('or')
continue
# delete the straggling 'or'
where.pop()
# run the query with the where clause
res = self.sfTool.query(BRANCH_CR_LINK_OBJ, where,
sc='brcrl')
if res not in BAD_INFO_LIST and \
type(res) in [types.ListType, types.TupleType]:
brCrList.extend(res)
pass
continue
return self.processFetchedBranchCrLinks(brCrList)
## END fetchCRBranchLinks
def processFetchedBranchCrLinks(self, brCrList):
"""
Common
"""
# slice list into two maps: BranchId -> CrId and CrId -> BranchId
brCrMap = {}
crBrMap = {}
for brCrLink in brCrList:
caseId = brCrLink.get('Case__c',None)
tbId = brCrLink.get('Task_Branch__c',None)
if caseId is not None and tbId is not None:
# add to the branch -> cr map
if not brCrMap.has_key(tbId):
brCrMap[tbId] = []
if caseId not in brCrMap[tbId]:
brCrMap[tbId].append(caseId)
# add to the cr -> branch map
if not crBrMap.has_key(caseId):
crBrMap[caseId] = []
if tbId not in crBrMap[caseId]:
crBrMap[caseId].append(tbId)
continue
return brCrMap, crBrMap
## END processFetchedBranchCrLinks
def retrieveCRs(self, crIdList):
"""Performs a retrieve of the supplied CR Id list
"""
crFieldList = ('Id','OwnerId','CaseNumber','Subject','Status',
'Priority','ExpeditedPriority__c')
crList = self.sfTool.retrieve(crIdList, CASE_OBJ,
fieldList=crFieldList)
if crList in BAD_INFO_LIST:
crList = []
pass
crMap = {}
for cr in crList:
crMap[cr.get('Id')] = cr
continue
return crMap
## END retrieveCRs
## END class ReportQueryMixin
class CrSectionCore(ReportSection, ReportQueryMixin):
def __init__(self, sfTool, userId, contact):
ReportSection.__init__(self, sfTool)
self.userId = userId
self.contact = contact
self.crMap = None
self.tbMap = None
self.recentClosedCrList = None
self.scmCrList = []
self.MgrAppCrList = []
self.PEAppCrList = []
self.TeamCrList = []
self.devCrList = []
self.untouchedCrList = []
self.unassignedCrList = []
self.brCrMap = {}
self.crBrMap = {}
# This will cause the data to be initialized
self.classifyCrs()
self.classifyTaskBranches()
return
## END __init__
def fetchOpenCreatedCrs(self):
""" Query for all open CRs that our user has created.
"""
if self.crMap is None:
crList = []
crMap = {}
f1 = ['CreatedById','=',self.userId]
f2 = ['IsClosed','=',False]
f3a = ['RecordTypeId','=',RECTYPE_CR]
f3b = ['RecordTypeId','=',RECTYPE_PVCR]
f3c = ['RecordTypeId','=',RECTYPE_PLDCR]
where = [f1,'and',f2,'and','(',f3a,'or',f3b,'or',f3c,')']
fields = ('Id','CaseNumber','Subject','Priority', 'OwnerId',
'ExpeditedPriority__c', 'Status', 'CreatedById')
res = self.sfTool.query(CASE_OBJ, where, fields)
if res not in BAD_INFO_LIST:
crList = res
pass
for cr in crList:
crMap[cr['Id']] = cr
self.crMap = crMap
pass
return self.crMap
## END fetchOpenCreatedCrs
def fetchBranchesOnCrs(self, crIdList):
if self.tbMap is None:
self.tbMap = []
# Fetch branch CR links on cases
self.brCrMap, self.crBrMap = self.fetchCrBranchLinks(crIdList)
# retrieve task branches by ID
self.tbMap = self.retrieveTaskBranches(self.brCrMap.keys())
pass
return self.tbMap
## END fetchBranchesOnCrs
def classifyCrs(self):
crMap = self.fetchOpenCreatedCrs()
crList = crMap.values()
crList.sort(sortCRsByPriority)
scmStatusRE = re.compile(r'(^|-)SCM-')
approveStatusRE = re.compile(r'Approv')
scmCrList = []
approveCrList = []
devCrList = []
untouchedCrList = [] # CR that's been assigned but is still open
unassignedCrList = []
unclassCrList = [] # CRs that we don't classify and report on (yet)
for cr in crList:
status = cr['Status']
if status == 'Open':
if cr['CreatedById'] == cr['OwnerId']:
# CR hasn't been assigned by the creator
unassignedCrList.append(cr)
else:
# CR has been assigned, but work hasn't yet started on it
untouchedCrList.append(cr)
pass
pass
elif status == 'Fixing':
# CR is in development
devCrList.append(cr)
elif scmStatusRE.search(status):
# CR is in an SCM state
scmCrList.append(cr)
elif approveStatusRE.search(status):
# CR is approving or has been approved
approveCrList.append(cr)
else:
unclassCrList.append(cr)
pass
continue
self.scmCrList = scmCrList
self.approveCrList = approveCrList
self.devCrList = devCrList
self.untouchedCrList = untouchedCrList
self.unassignedCrList = unassignedCrList
self.unclassCrList = unclassCrList
return
## END classifyCrs
def classifyTaskBranches(self):
crMap = self.fetchOpenCreatedCrs()
tbMap = self.fetchBranchesOnCrs(crMap.keys())
fixingStates = ['Fixing', 'Rejected by Mgr', 'Rejected by PE']
mgrAppStates = ['Approving by Manager']
peAppStates = ['Approving by PE']
teamStates = ['Approved, pending Team Branch']
scmStates = ['Submitted to SCM','SCM-Submitted','SCM-Received','SCM-Need Branch','SCM-QOR Building','SCM-QOR Testing','SCM-QOR Results','SCM-Hold','post_release','SCM-Post-Release','SCM-Ready to Bundle','SCM-Bundle Building','SCM-Bundle Testing','SCM-Bundle Results','SCM-Approved','SCM-Patch-Build-Delayed','SCM-Patch-Building','SCM-Patch-Build Testing','SCM-Patch-Build Results','SCM-Red-Building','SCM-Red-Build Results','SCM-Candidate-Building','SCM-Candidate-Build Testing','SCM-Candidate-Build Results','SCM-Patch-Build Today','SCM-Ready-for-Patch-Build','SCM-Red-Build Today']
testingStates = ['Merged']
devTbList = [] # Branches in Fixing state
mgrAppTbList = [] # Branches in Mgr Approving
peAppTbList = [] # Branches in PE Approving
teamTbList = [] # Branches in Team phase
scmTbList = [] # Branches in SCM
testTbList = [] # Branches in testing
unclassTbList = [] # all other Branches
for tbId, tb in tbMap.items():
status = tb.get('Branch_Status__c','')
if status in fixingStates:
devTbList.append(tb)
elif status in mgrAppStates:
mgrAppTbList.append(tb)
elif status in peAppStates:
peAppTbList.append(tb)
elif status in teamStates:
teamTbList.append(tb)
elif status in scmStates:
scmTbList.append(tb)
elif status in testingStates:
testTbList.append(tb)
else:
unclassTbList.append(tb)
pass
continue
self.devTbList = devTbList
self.mgrAppTbList = mgrAppTbList
self.peAppTbList = peAppTbList
self.teamTbList = teamTbList
self.scmTbList = scmTbList
self.testTbList = testTbList
self.unclassTbList = unclassTbList
return
## END def classifyBranches
## END class CrSectionCore
class CrTbSectionBase(ReportSection):
""" Common bits for CR sections that rely on Task Branch info
"""
seriesSize = 15
def __init__(self, sfTool, userId, contact, core, tbList):
"""
sfTool - sfTool instance
userId - sfdc user ID of the person we're generating the report for
contact - contact record of the person we're writing report for
core - CrSectionCore instance
"""
ReportSection.__init__(self, sfTool)
self.userId = userId
self.contact = contact
# core is the CrSectionCore instance with the user's CR data in it
self.tbList = tbList
self.core = core
return
## END __init__
def buildSecBody(self):
body = cStringIO.StringIO()
if len(self.tbList):
for tb in self.tbList:
tbId = tb['Id']
crIdList = self.core.brCrMap.get(tbId, [])
for crId in crIdList:
cr = self.core.crMap[crId]
tbUrl = "%s/%s" %(self.baseUrl, tbId)
tbLine = "%08d (%s in %s)" \
%(int(cr['CaseNumber']),
self.sfTool.getCrPriority(cr),
tb.get('Code_Stream__c','No Code Stream'))
tbLine2 = "%s" %textwrap.fill(cr.get('Subject', 'No CR Subject'))
outStr = "%s\n%s\n\tTask Branch: %s\n\n" %(tbLine, tbLine2, tbUrl)
body.write(outStr.encode('ascii','replace'))
self.hasContentFlag = True
return body.getvalue()
## END buildSecBody
## END class CrTbSectionBase
class CrSectionBase(ReportSection):
"""
Common bits for CR sections that don't rely on MOD info
"""
def __init__(self, sfTool, userId, contact, core, crList):
"""
sfTool - sfTool instance
userId - sfdc user ID of the person we're generating the report for
contact - contact record of the person we're writing report for
core - CrSectionCore instance
"""
ReportSection.__init__(self, sfTool)
self.userId = userId
self.contact = contact
# core is the CrSectionCore instance with the user's CR data in it
self.crList = crList
self.core = core
return
## END __init__
def buildSecBody(self):
body = cStringIO.StringIO()
if len(self.crList):
for cr in self.crList:
crUrl = "%s/%s" %(self.baseUrl, cr['Id'])
crLine = "%08d (%s)\n%s" \
%(int(cr['CaseNumber']), self.sfTool.getCrPriority(cr),
textwrap.fill(cr.get('Subject', 'No Subject')))
outStr = "%s\n\t%s\n\n" %(crLine, crUrl)
body.write(outStr.encode('ascii','replace'))
continue
self.hasContentFlag = True
pass
return body.getvalue()
## END buildSecBody
## END class CrSectionBase
class CRsWithFixesSection(ReportSection, ReportQueryMixin):
# First section to convert.....
def __init__(self, sfTool, userId, contact):
ReportSection.__init__(self, sfTool)
self.header = "My CRs With Fixes Available"
self.userId = userId
self.contact = contact
## END __init__
def buildSecBody(self):
body = cStringIO.StringIO()
dataList = self.fetchSectionDataFlow()
if len(dataList):
for dataTuple in dataList:
crData = dataTuple[0]
tbData = dataTuple[1]
baData = dataTuple[2]
#baUrl = "Edit Branch Approval: %s/%s/e" \
baUrl = "Branch Approval: %s/%s" \
%(self.baseUrl, baData.get('Id',''))
tbUrl = "Task Branch: %s/%s" \
%(self.baseUrl, tbData['Id'])
tbLine1 = "%08d (%s in %s)" \
%(int(crData.get('CaseNumber','').lstrip('0')),
self.sfTool.getCrPriority(crData),
tbData.get('Code_Stream__c',''))
tbLine2 = "%s" %textwrap.fill(crData.get('Subject', 'No CR Subject'))
outStr = "%s\n%s\n\t%s\n\t%s\n\n" %(tbLine1, tbLine2, baUrl, tbUrl)
body.write(outStr.encode('ascii','replace'))
# set the section footer so that it gets written
self.footer = "(Note: Please test these CRs and mark the Branch Approvals as appropriate)"
self.hasContentFlag = True
else:
body.write("No CRs are available for you to test at this time.\n")
return body.getvalue()
## END buildBody(self)
def fetchSectionDataFlow(self):
""" Contains the flow for assembling this section's data
"""
tbMap = {}
brCrMap = {}
crBrMap = {}
crMap = {}
tbIdList, baByTbCrMap = self.fetchOriginatorTestingBAs()
if len(tbIdList):
tbMap = self.retrieveTaskBranches(tbIdList)
brCrMap, crBrMap = self.fetchBranchCRLinks(tbIdList)
if len(crBrMap):
crMap = self.retrieveCRs(crBrMap.keys())
pass
# now, correlate the data:
crList = [] #[ ({crData}, {tbData}, {baData})]
# crawl found TBs
for tbId, tbData in tbMap.items():
crIdList = brCrMap.get(tbId, [])
for crId in crIdList:
crData = crMap.get(crId, None)
if crData is None:
continue
crNum = crData.get('CaseNumber','').lstrip('0')
baKey = '%s:%s' %(tbId, crNum)
baData = baByTbCrMap.get(baKey, None)
if crData is not None and baData is not None:
crList.append((crData, tbData, baData))
pass
continue
continue
crList.sort(sortBaCrsByPriority)
return crList
## END fetchSectionDataFlow
## END class CRsWithFixesSection(ReportSection)
class ScmCrSection(CrTbSectionBase):
"""
Report section on CRs that are in SCM
"""
def __init__(self, sfTool, userId, contact, core):
"""
sfTool - SFMagmaTool (or subclass) instance
userId - sfdc user ID of the person we're generating the report for
contact - contact record of the person we're writing report for
core - CrSectionCore instance
"""
CrTbSectionBase.__init__(self, sfTool, userId, contact,
core, core.scmTbList)
self.header = "My CRs in SCM"
return
## END __init__
def buildSecBody(self):
# Override parent method, but still call it.
body = CrTbSectionBase.buildSecBody(self)
if self.hasContent():
# set self.footer here to add a footer to the section
pass
else:
body = "You have no CRs in SCM at the time of this report.\n"
return body
## END buildSecBody
## END class ScmCrSection
class TeamCrSection(CrTbSectionBase):
"""
Report section on CRs that are in the Team phase
"""
def __init__(self, sfTool, userId, contact, core):
"""
sfTool - SFMagmaTool (or subclass) instance
userId - sfdc user ID of the person we're generating the report for
contact - contact record of the person we're writing report for
core - CrSectionCore instance
"""
CrTbSectionBase.__init__(self, sfTool, userId, contact,
core, core.teamTbList)
self.header = "My CRs in the Team Phase"
return
## END __init__
def buildSecBody(self):
# Override parent method, but still call it.
body = CrTbSectionBase.buildSecBody(self)
if self.hasContent():
# set self.footer here to add a footer to the section
pass
else:
body = "You have no CRs in the Team Phase at the time of this report.\n"
return body
## END buildSecBody
## END class TeamCrSection
class AppPeCrSection(CrTbSectionBase):
"""
Report section on CRs that have branches up for approval by PE
"""
def __init__(self, sfTool, userId, contact, core):
"""
sfTool - SFMagmaTool (or subclass) instance
userId - sfdc user ID of the person we're generating the report for
contact - contact record of the person we're writing report for
core - CrSectionCore instance
"""
CrTbSectionBase.__init__(self, sfTool, userId, contact,
core, core.peAppTbList)
self.header = "My CRs in Branches Awaiting PE Approval"
# select the PE BAs on the TbList which are "Approving"
self.approverMap = self.fetchPEApprovers(core.peAppTbList)
return
## END __init__
def fetchPEApprovers(self, tbList):
""" Get Approving managers on listed task branches for role
"""
f1 = ['Status__c','=','Approving']
f2 = ['Approval_Role__c','=','Product Engineer']
fields = ('Id','Task_Branch__c','CR_List__c','OwnerId')
approverMap = {}
uidList = []
myTbList = copy.deepcopy(tbList)
while len(myTbList) > 0:
series = myTbList[:self.seriesSize]
myTbList = myTbList[self.seriesSize:]
where = [f1,'and',f2,'and','(']
for tb in series:
tbId = tb['Id']
where.append(['Task_Branch__c','=',tbId])
where.append('or')
continue
where.pop()
where += [')']
# run the query with the where clause
res = self.sfTool.query('Branch_Approval__c', where,
sc=fields)
if res not in BAD_INFO_LIST:
for ba in res:
crListStr = ba.get('CR_List__c')
crList = crListStr.split(',')
for crNum in crList:
if crNum == '': continue
crNum = crNum.lstrip('0')
key = "%s-%s" %(ba['Task_Branch__c'], crNum)
approverMap[key] = ba
continue
if ba['OwnerId'] not in uidList:
uidList.append(ba['OwnerId'])
pass
continue
pass
continue
# retrieve the list of owner IDs
userNameMap = {}
fields = ('Id', 'FirstName', 'LastName')
res = self.sfTool.retrieve(uidList, 'User', fieldList=fields)
if res not in BAD_INFO_LIST:
for user in res:
userName = "%s %s" %(user.get('FirstName',''), user.get('LastName'))
userNameMap[user['Id']] = userName.lstrip()
# now match the names up with the approver IDs
for key, ba in approverMap.items():
approverId = ba['OwnerId']
ba['OwnerName'] = userNameMap.get(approverId,'Name Not Found')
approverMap[key] = ba
return approverMap
## END fetchPEApprovers
def buildSecBody(self):
body = cStringIO.StringIO()
if len(self.tbList):
for tb in self.tbList:
tbId = tb['Id']
crIdList = self.core.brCrMap.get(tbId, [])
for crId in crIdList:
cr = self.core.crMap[crId]
crNum = cr.get('CaseNumber').lstrip('0')
key = "%s-%s" %(tbId, crNum)
approverInfo = self.approverMap.get(key,{})
approverName = approverInfo.get('OwnerName','')
tbUrl = "%s/%s" %(self.baseUrl, tbId)
if len(approverName):
tbLine = "%08d (%s in %s by %s)" \
%(int(cr['CaseNumber']),
self.sfTool.getCrPriority(cr),
tb.get('Code_Stream__c','No Code Stream'),
approverName)
else:
tbLine = "%08d (%s in %s)" \
%(int(cr['CaseNumber']),
self.sfTool.getCrPriority(cr),
tb.get('Code_Stream__c','No Code Stream'))
tbLine2 = "%s" %textwrap.fill(cr.get('Subject', 'No CR Subject'))
tbLine2 = tbLine2.strip()
outStr = "%s\n" %tbLine
outStr += "%s\n" %tbLine2
outStr += "\tTask Branch: %s\n\n" %tbUrl
body.write(outStr.encode('ascii','replace'))
self.hasContentFlag = True
if self.hasContent():
# set self.footer here to add a footer to the section
# set the section footer so that it gets written
self.footer = "(Note: Each of the CRs listed above may have already been\n"
self.footer += "approved in the linked task branch, however the task branch\n"
self.footer += "may still be waiting for approval by other PEs on other CRs.)"
else:
body.write("You have no CRs in branches awaiting PE approval at the time of this report.\n")
pass
return body.getvalue()
## END buildSecBody## END class ScmAppPeSection
class AppMgrCrSection(CrTbSectionBase):
"""
Report section on CRs that have branches up for approval by Eng Mgr.
"""
def __init__(self, sfTool, userId, contact, core):
"""
sfTool - SFMagmaTool (or subclass) instance
userId - sfdc user ID of the person we're generating the report for
contact - contact record of the person we're writing report for
core - CrSectionCore instance
"""
CrTbSectionBase.__init__(self, sfTool, userId, contact,
core, core.mgrAppTbList)
self.header = "My CRs in Branches Awaiting Engineering Manager Approval"
# select the manager BAs on the TbList which are "Approving"
self.approverMap = self.fetchMgrApprovers(core.mgrAppTbList)
return
## END __init__
def fetchMgrApprovers(self, tbList):
""" Get Approving managers on listed task branches for role
"""
f1 = ['Status__c','=','Approving']
f2 = ['Approval_Role__c','=','Engineering Manager']
fields = ('Id','Task_Branch__c','OwnerId')
approverMap = {}
uidList = []
myTbList = copy.deepcopy(tbList)
while len(myTbList) > 0:
series = myTbList[:self.seriesSize]
myTbList = myTbList[self.seriesSize:]
where = [f1,'and',f2,'and','(']
for tb in series:
tbId = tb['Id']
where.append(['Task_Branch__c','=',tbId])
where.append('or')
continue
where.pop()
where += [')']
# run the query with the where clause
res = self.sfTool.query('Branch_Approval__c', where,
sc=fields)
if res not in BAD_INFO_LIST:
for ba in res:
approverMap[ba['Task_Branch__c']] = ba
if ba['OwnerId'] not in uidList:
uidList.append(ba['OwnerId'])
continue
pass
continue
# retrieve the list of owner IDs
userNameMap = {}
fields = ('Id', 'FirstName', 'LastName')
res = self.sfTool.retrieve(uidList, 'User', fieldList=fields)
if res not in BAD_INFO_LIST:
for user in res:
userName = "%s %s" %(user.get('FirstName',''), user.get('LastName'))
userNameMap[user['Id']] = userName.lstrip()
# now match the names up with the approver IDs
for tbId, ba in approverMap.items():
approverId = ba['OwnerId']
ba['OwnerName'] = userNameMap.get(approverId,'Name Not Found')
approverMap[tbId] = ba
return approverMap
## END fetchMgrBAs
def buildSecBody(self):
body = cStringIO.StringIO()
if len(self.tbList):
for tb in self.tbList:
tbId = tb['Id']
crIdList = self.core.brCrMap.get(tbId, [])
for crId in crIdList:
cr = self.core.crMap[crId]
approverInfo = self.approverMap.get(tbId,{})
approverName = approverInfo.get('OwnerName','')
tbUrl = "%s/%s" %(self.baseUrl, tbId)
if len(approverName):
tbLine = "%08d (%s in %s by %s)" \
%(int(cr['CaseNumber']),
self.sfTool.getCrPriority(cr),
tb.get('Code_Stream__c','No Code Stream'),
approverName)
else:
tbLine = "%08d (%s in %s)" \
%(int(cr['CaseNumber']),
self.sfTool.getCrPriority(cr),
tb.get('Code_Stream__c','No Code Stream'))
tbLine2 = "%s" %textwrap.fill(cr.get('Subject', 'No CR Subject'))
tbLine2 = tbLine2.strip()
outStr = "%s\n%s\n\tTask Branch: %s\n\n" %(tbLine, tbLine2, tbUrl)
body.write(outStr.encode('ascii','replace'))
self.hasContentFlag = True
if self.hasContent():
# set self.footer here to add a footer to the section
pass
else:
body.write("You have no CRs in branches awaiting mgr. approval at the time of this report.\n")
return body.getvalue()
## END buildSecBody
## END class ScmAppMgrSection
class DevCrSection(CrTbSectionBase):
"""
Report section on CRs that are in development
"""
def __init__(self, sfTool, userId, contact, core):
"""
sfTool - SFMagmaTool (or subclass) instance
userId - sfdc user ID of the person we're generating the report for
contact - contact record of the person we're writing report for
core - CrSectionCore instance
"""
CrTbSectionBase.__init__(self, sfTool, userId, contact,
core, core.devTbList)
self.header = "My CRs in Development"
return
## END __init__
def buildSecBody(self):
# Override parent method, but still call it.
body = CrTbSectionBase.buildSecBody(self)
if self.hasContent():
# set self.footer here to add a footer to the section
pass
else:
body = "You have no CRs in development at the time of this report.\n"
return body
## END buildSecBody
## END class DevCrSection
class UnassignedCrSection(CrSectionBase):
"""
Report section on CRs that are owned by creator and status is Open
"""
def __init__(self, sfTool, userId, contact, core):
"""
notifier - sfNotify instance
userId - sfdc user ID of the person we're generating the report for
contact - contact record of the person we're writing report for
core - CrSectionCore instance
"""
CrSectionBase.__init__(self, sfTool, userId, contact,
core, core.unassignedCrList)
self.header = "My CRs That I Haven't Assigned Yet"
return
## END __init__
def buildSecBody(self):
# Override parent method, but still call it.
body = CrSectionBase.buildSecBody(self)
if self.hasContent():
self.footer = "You may need to assign the CRs in this section to a User or a Queue. See this solution for details: %s/501300000000DiE" %self.baseUrl
else:
body = "You have no CRs that you may need to assign at the time of this report.\n"
return body
## END buildSecBody
## END class UnassignedCrSection
class UntouchedCrSection(CrSectionBase):
"""
Report section on CRs that have been assigned and status is Open
"""
def __init__(self, sfTool, userId, contact, core):
"""
notifier - sfNotify instance
userId - sfdc user ID of the person we're generating the report for
contact - contact record of the person we're writing report for
core - CrSectionCore instance
"""
CrSectionBase.__init__(self, sfTool, userId, contact,
core, core.untouchedCrList)
self.header = "My CRs Pending Development"
return
## END __init__
def buildSecBody(self):
# Override parent method, but still call it.
body = CrSectionBase.buildSecBody(self)
if self.hasContent():
# set self.footer here to add a footer to the section
pass
else:
body = "You have no CRs pending development at the time of this report.\n"
return body
## END buildSecBody
## END class UntouchedCrSection
class ClosedCrSection(ReportSection):
def __init__(self, sfTool, userId, contact):
ReportSection.__init__(self, sfTool)
self.header = "My CRs That Were Closed Since the Last Report"
self.userId = userId
self.contact = contact
self.recentClosedCrList = []
return
## END __init__
def buildSecBody(self):
body = cStringIO.StringIO()
self.fetchRecentClosedCrs()
if len(self.recentClosedCrList):
for cr in self.recentClosedCrList:
if cr['closeInfo']['CreatedById'] == self.userId:
closerName = "you"
else:
closerName = "%s %s" %(cr['closeUser'].get('FirstName', ''),
cr['closeUser'].get('LastName'))
closerName = closerName.strip() # in case no fname
crUrl = "%s/%s" %(self.baseUrl, cr['Id'])
crLine1 = "%08d (%s) %s" \
%(int(cr['CaseNumber']),
self.sfTool.getCrPriority(cr),
cr.get('Subject', 'No CR Subject'))
crLine2 = "has been closed by %s" %closerName
outStr = "%s\n\t%s\n\t%s\n\n" %(crUrl, crLine1, crLine2)
body.write(outStr.encode('ascii','replace'))
self.hasContentFlag = True
else:
body.write("No CRs you created have been closed since the last time your report was run.\n")
return body.getvalue()
## END buildSecBody
def fetchRecentClosedCrs(self):
"""
Fetch user's CRs which have been closed since the last report run date
"""
f1a = ['RecordTypeId', '=', RECTYPE_CR]
f1b = ['RecordTypeId', '=', RECTYPE_PVCR]
f1c = ['RecordTypeId', '=', RECTYPE_PLDCR]
f2 = ['CreatedById', '=', self.userId]
f3 = ['IsClosed', '=', True]
# This is all operated on in UTC
(discard, discard, lastReportDate) = self.sfTool.parseLastReportDate(self.contact)
f4 = ['LastModifiedDate', '>', lastReportDate]
where = ['(',f1a,'or',f1b,'or',f1c,')','and',f2,'and',f3,'and',f4]
fields = ()
crResult = self.sfTool.query(CASE_OBJ, where, sc='all')
crList = []
if crResult not in BAD_INFO_LIST:
crList = crResult
crList.sort(sortCRsByPriority)
# annotate each CR with who closed it.
# Also throw out false positives (CR which is closed, and
# last modified is later than Last Report, but latest
# closed history item shows CR was closed earlier
newCrList = []
for cr in crList:
cr = self.getCrClosureInfo(cr)
if cr['closeInfo'] is None:
continue
else:
newCrList.append(cr)
self.recentClosedCrList = newCrList
return self.recentClosedCrList
## END getRecentClosedCrs
def getCrClosureInfo(self, cr):
"""
Look in case history to see who closed the CR
We only care about most recent closure.
If no records are found, that means that while the CR may have
been modified in the time period we're looking at, it was closed
prior.
CR map is populated with a closeInfo key and returned.
"""
(discard, discard, lastReportDate) = self.sfTool.parseLastReportDate(self.contact)
f1 = ['CaseId', '=', cr['Id']]
f2 = ['Field', '=', 'Status']
f3 = ['NewValue', 'like', 'Closed%']
f4 = ['CreatedDate', '>', lastReportDate]
where = [f1, 'and', f2, 'and', f3, 'and', f4]
chResult = self.sfTool.query(CASE_HISTORY_OBJ, where, sc='all')
if chResult in BAD_INFO_LIST:
cr['closeInfo'] = None
cr['closeUser'] = None
elif len(chResult) >= 1:
chResult.sort(lambda a, b: cmp(a['CreatedDate'], b['CreatedDate']))
cr['closeInfo'] = chResult[-1]
# Also, grap the user record of who closed it.
cr['closeUser'] = self.lookupUserByIdCache(cr['closeInfo']['CreatedById'])
return cr
## END getCrClosureInfo
## END class ClosedCrSection
def sortBaCrsByPriority(a, b):
"""
Sort two MODs by their parent CRs (must be included in MOD dict)
"""
aCr = a[0]
bCr = b[0]
return sortCRsByPriority(aCr, bCr)
## END sortMODsByPriority(a, b)
def sortCRsByPriority(a, b):
"""
Compare two CRs by their priority fields
"""
# If priority isn't set, default it to 3 - Medium for sort
# If expedite priority isn't set, default it to 2 - No for sort
aPriority = a.get('Priority', '3')[:1]
aExpPri = a.get('ExpeditedPriority__c', '2')[:1]
bPriority = b.get('Priority', '3')[:1]
bExpPri = b.get('ExpeditedPriority__c', '2')[:1]
aPriVal = "%s%s" %(aPriority, aExpPri)
bPriVal = "%s%s" %(bPriority, bExpPri)
return cmp(aPriVal,bPriVal)
## END sortCRsByPriority
def sortEntityListByCreatedDate(a, b):
"""
As the name says...
The Created Date is an ISO-8601 DateTime which is sortable as a string.
"""
aDate = a['createdDate']
bDate = b['createdDate']
return cmp(aDate, bDate)
## END sortEntityListByCreatedDate
| 35.902273
| 592
| 0.527674
|
6b1ff94f639875836cf6c30084d2b32831bcabbd
| 9,455
|
py
|
Python
|
map.py
|
sumitsk/algp
|
dfe7654013ebdcbc1d4624dd59aa56f3e773f4e9
|
[
"MIT"
] | null | null | null |
map.py
|
sumitsk/algp
|
dfe7654013ebdcbc1d4624dd59aa56f3e773f4e9
|
[
"MIT"
] | null | null | null |
map.py
|
sumitsk/algp
|
dfe7654013ebdcbc1d4624dd59aa56f3e773f4e9
|
[
"MIT"
] | 1
|
2019-03-01T00:01:47.000Z
|
2019-03-01T00:01:47.000Z
|
import numpy as np
from utils import manhattan_distance
from graph_utils import get_heading, opposite_headings
import ipdb
class Map(object):
def __init__(self, num_gp_rows=15, num_gp_cols=37, num_row_passes=2, row_pass_width=1):
super(Map, self).__init__()
self.num_gp_rows = num_gp_rows
self.num_gp_cols = num_gp_cols
self.num_row_passes = num_row_passes
self.row_pass_width = row_pass_width
assert self.num_gp_rows % (self.num_row_passes + 1) == 0, 'Infeasible row setting'
self._shape = self._compute_map_dimensions()
self.corridor_len = self.num_gp_rows // (self.num_row_passes + 1)
self.row_pass_indices = self._get_row_pass_indices()
self.free_cols = np.arange(0, self.shape[1], 2)
self.obstacle_cols = np.arange(1, self.shape[1], 2)
# 1 if obstacle 0 otherwise
self.occupied = self._set_occupancy_grid()
@property
def shape(self):
return self._shape
def _set_occupancy_grid(self):
# returns the occupancy grid of the map
grid = np.full(self._shape, False)
grid[:, self.obstacle_cols] = True
grid[self.row_pass_indices, :] = False
return grid
def _compute_map_dimensions(self):
# extra row at top and bottom
total_rows = self.num_gp_rows + (self.num_row_passes + 2) * self.row_pass_width
total_cols = self.num_gp_cols * 2 - 1
return total_rows, total_cols
def _get_row_pass_indices(self):
# return indices of all the row pass
ind = []
t = 0
last = 0
while t < self.num_row_passes + 2:
ind += list(range(last, last + self.row_pass_width))
t += 1
last = last + self.row_pass_width + self.corridor_len
return np.array(ind)
def distance_between_nodes(self, start, goal, heading):
# return distance between start and goal and final heading on reaching goal
if start == goal:
return 0, heading
# these cases should never occur
if start[0] not in self.row_pass_indices and heading not in [(1,0),(-1,0)]:
raise NotImplementedError('Starting location has infeasible heading')
if goal[0] in self.row_pass_indices:
raise NotImplementedError('Goal location is a junction')
# if start and goal are in the same column
if start[1] == goal[1]:
final_heading = get_heading(start, goal)
# if headings align, move to the goal directly
if not opposite_headings(heading, final_heading):
return manhattan_distance(start, goal), final_heading
# if not, move to the junction, then move to the adjacent column (and come back later) and proceed to the goal
else:
sj = self.get_junction(start, heading)
gj = self.get_junction(goal, heading)
# start and goal are in different blocks
if sj!=gj:
total_dist = manhattan_distance(start, sj) + 2*2 + manhattan_distance(sj, goal)
return total_dist, (-heading[0], 0)
# start and goal are in the same block, need to come back in this block
else:
node = self.get_junction(goal, (-heading[0], 0))
total_dist = manhattan_distance(start, sj) + 2*2 + manhattan_distance(sj, node) + manhattan_distance(node, goal)
return total_dist, heading
# start and goal are in different columns
else:
# move to the junction and then proceed to the goal
if heading in [(1,0), (-1,0)]:
node = self.get_junction(start, heading)
total_dist = manhattan_distance(start, node) + manhattan_distance(node, goal)
# shift to the goal column to compute final heading
final_heading = get_heading((node[0], goal[1]), goal)
final_heading = heading if final_heading is None else final_heading
return total_dist, final_heading
# start location is a junction and heading is either east or west
else:
# if heading points towards the goal direction, just move there
if (goal[1] >= start[1] and heading[1] > 0) or (goal[1] <= start[1] and heading[1] < 0):
total_dist = manhattan_distance(start, goal)
# shift to the goal column to compute final heading
final_heading = get_heading((start[0], goal[1]), goal)
return total_dist, final_heading
# if heading points in the opposite direction of goal
else:
up_node = self.get_up_junction(goal)
down_node = self.get_down_junction(goal)
# go to down node if up node lies in the same row as start
if start[0] == up_node[0]:
total_dist = manhattan_distance(start, down_node) + manhattan_distance(down_node, goal)
final_heading = (-1,0)
# go to up node if down node lies in the same row as start
elif start[0] == down_node[0]:
total_dist = manhattan_distance(start, up_node) + manhattan_distance(up_node, goal)
final_heading = (1,0)
else:
total_dist = manhattan_distance(start, goal)
final_heading = get_heading((start[0], goal[1]), goal)
return total_dist, final_heading
def distance_between_nodes_with_headings(self, start, start_heading, goal, goal_heading):
dist, final_heading = self.distance_between_nodes(start, goal, start_heading)
if not opposite_headings(final_heading, goal_heading):
return dist
# Goal heading is opposite of final_heading
perimeter = 4 + 2*(self.corridor_len+1)
if start_heading in [(1,0),(-1,0)]:
start_junc = self.get_junction(start, start_heading)
gj = self.get_junction(goal, start_heading)
goal_junc = self.get_junction(goal, (-goal_heading[0], goal_heading[1]))
# the agent has to move atleast this distance
min_dist = manhattan_distance(start, start_junc) + manhattan_distance(start_junc, goal_junc) + manhattan_distance(goal_junc, goal)
# if start and goal are in the same column
if start[1]==goal[1]:
# same block
if start_junc[0]==gj[0]:
# top or bottom block
if start_junc[0]==0 or start_junc[0]==self.shape[0]-1:
extra = perimeter + 4
else:
extra = perimeter
# different blocks
else:
extra = 4
# start and goal are in adjacent sampling columns
elif abs(start[1]-goal[1])==2:
if start_heading == goal_heading:
extra = 4
else:
extra = 0
else:
extra = 0
else:
raise NotImplementedError
return min_dist + extra
def get_junction(self, pose, heading):
# return junction in the heading direction
if heading == (1,0):
return self.get_down_junction(pose)
elif heading == (-1,0):
return self.get_up_junction(pose)
else:
return pose
def get_up_junction(self, pose):
# return up junction (in decreasing x direction)
up = max([x for x in self.row_pass_indices if x<=pose[0]])
return (up, pose[1])
def get_down_junction(self, pose):
# return down junction (in increasing x direction)
down = min([x for x in self.row_pass_indices if x>=pose[0]])
return (down, pose[1])
def nearest_waypoint_path_cost(self, start, start_heading, waypoints, return_seq=False):
# return cost of the path formed by always moving to the nearest waypoint
nw = len(waypoints)
visited = [False]*nw
total_cost = 0
node = start
heading = start_heading
if return_seq:
seq = []
costs = []
# final_headings = []
while sum(visited) != nw:
# find the nearest waypoint from the current node
all_dists = [np.inf]*nw
all_final_headings = [(0,0)]*nw
for i in range(nw):
if visited[i]:
continue
dist, final_heading = self.distance_between_nodes(node, waypoints[i], heading)
all_dists[i] = dist
all_final_headings[i] = final_heading
idx = np.argmin(all_dists)
total_cost += all_dists[idx]
node = waypoints[idx]
heading = all_final_headings[idx]
visited[idx] = True
if return_seq:
costs.append(all_dists[idx])
seq.append(idx)
# final_headings.append(heading)
if return_seq:
# return costs, seq, final_headings
return costs, seq
return total_cost
| 41.469298
| 142
| 0.571126
|
356a654b316e26d9ef53a353863f472c54347a6b
| 4,621
|
py
|
Python
|
plugins/modules/panos_op.py
|
Nothing4You/pan-os-ansible
|
50078a71eef0c5e9a4263020135df6a0e05961bf
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/panos_op.py
|
Nothing4You/pan-os-ansible
|
50078a71eef0c5e9a4263020135df6a0e05961bf
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/panos_op.py
|
Nothing4You/pan-os-ansible
|
50078a71eef0c5e9a4263020135df6a0e05961bf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2017 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: panos_op
short_description: execute arbitrary OP commands on PANW devices (e.g. show interface all)
description:
- This module will allow user to pass and execute any supported OP command on the PANW device.
author:
- Ivan Bojer (@ivanbojer)
- Garfield Lee Freeman (@shinmog)
version_added: '1.0.0'
requirements:
- pan-python can be obtained from PyPI U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice)
- xmltodict
notes:
- Checkmode is NOT supported.
- Panorama is supported.
extends_documentation_fragment:
- paloaltonetworks.panos.fragments.transitional_provider
- paloaltonetworks.panos.fragments.vsys
options:
cmd:
description:
- The OP command to be performed.
type: str
required: true
cmd_is_xml:
description:
- The cmd is already given in XML format, so don't convert it.
type: bool
default: false
vsys:
description:
- The vsys target where the OP command will be performed.
type: str
default: "vsys1"
'''
EXAMPLES = '''
- name: show list of all interfaces
panos_op:
provider: '{{ provider }}'
cmd: 'show interfaces all'
- name: show system info
panos_op:
provider: '{{ provider }}'
cmd: 'show system info'
- name: show system info as XML command
panos_op:
provider: '{{ provider }}'
cmd: '<show><system><info/></system></show>'
cmd_is_xml: true
'''
RETURN = '''
stdout:
description: output of the given OP command as JSON formatted string
returned: success
type: str
sample: "{system: {app-release-date: 2017/05/01 15:09:12}}"
stdout_xml:
description: output of the given OP command as an XML formatted string
returned: success
type: str
sample: "<response status=success><result><system><hostname>fw2</hostname>"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.paloaltonetworks.panos.plugins.module_utils.panos import get_connection
try:
from panos.errors import PanDeviceError
except ImportError:
try:
from pandevice.errors import PanDeviceError
except ImportError:
pass
try:
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def main():
helper = get_connection(
vsys=True,
with_classic_provider_spec=True,
argument_spec=dict(
cmd=dict(required=True),
cmd_is_xml=dict(default=False, type='bool'),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=False,
required_one_of=helper.required_one_of,
)
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
parent = helper.get_pandevice_parent(module)
cmd = module.params['cmd']
cmd_is_xml = module.params['cmd_is_xml']
changed = True
safecmd = ['diff', 'show']
xml_output = ''
try:
xml_output = parent.op(cmd, xml=True, cmd_xml=(not cmd_is_xml))
except PanDeviceError as e1:
if cmd_is_xml:
module.fail_json(msg='Failed to run XML command : {0} : {1}'.format(cmd, e1))
tokens = cmd.split()
tokens[-1] = '"{0}"'.format(tokens[-1])
cmd2 = ' '.join(tokens)
try:
xml_output = parent.op(cmd2, xml=True)
except PanDeviceError as e2:
module.fail_json(msg='Failed to run command : {0} : {1}'.format(cmd2, e2))
if tokens[0] in safecmd:
changed = False
obj_dict = xmltodict.parse(xml_output)
json_output = json.dumps(obj_dict)
module.exit_json(changed=changed, msg="Done",
stdout=json_output, stdout_xml=xml_output)
if __name__ == '__main__':
main()
| 28.176829
| 98
| 0.664358
|
8936db7add2b23cf311ab1b0d512951998770534
| 7,969
|
py
|
Python
|
dynamic_preferences/registries.py
|
dadoeyad/django-dynamic-preferences
|
fa012e3012f7149a92bd2daa57efd4c5e0aba32c
|
[
"BSD-3-Clause"
] | null | null | null |
dynamic_preferences/registries.py
|
dadoeyad/django-dynamic-preferences
|
fa012e3012f7149a92bd2daa57efd4c5e0aba32c
|
[
"BSD-3-Clause"
] | null | null | null |
dynamic_preferences/registries.py
|
dadoeyad/django-dynamic-preferences
|
fa012e3012f7149a92bd2daa57efd4c5e0aba32c
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db.models.fields import FieldDoesNotExist
from django.apps import apps
# import the logging library
import warnings
import logging
import collections
import persisting_theory
# Get an instance of a logger
logger = logging.getLogger(__name__)
#: The package where autodiscover will try to find preferences to register
from .managers import PreferencesManager
from .settings import preferences_settings
from .exceptions import NotFoundInRegistry
from .types import StringPreference
from .preferences import EMPTY_SECTION, Section
class MissingPreference(StringPreference):
"""
Used as a fallback when the preference object is not found in registries
This can happen for example when you delete a preference in the code,
but don't remove the corresponding entries in database
"""
pass
class PreferenceModelsRegistry(persisting_theory.Registry):
"""Store relationships beetween preferences model and preferences registry"""
look_into = preferences_settings.REGISTRY_MODULE
def register(self, preference_model, preference_registry):
self[preference_model] = preference_registry
preference_registry.preference_model = preference_model
if not hasattr(preference_model, 'registry'):
setattr(preference_model, 'registry', preference_registry)
self.attach_manager(preference_model, preference_registry)
def attach_manager(self, model, registry):
if not hasattr(model, 'instance'):
return
def instance_getter(self):
return registry.manager(instance=self)
getter = property(instance_getter)
instance_class = model._meta.get_field('instance').remote_field.model
setattr(instance_class, preferences_settings.MANAGER_ATTRIBUTE, getter)
def get_by_preference(self, preference):
return self[
preference._meta.proxy_for_model if preference._meta.proxy
else preference.__class__
]
def get_by_instance(self, instance):
"""Return a preference registry using a model instance"""
# we iterate throught registered preference models in order to get the instance class
# and check if instance is and instance of this class
for model, registry in self.items():
try:
instance_class = model._meta.get_field('instance').remote_field.model
if isinstance(instance, instance_class):
return registry
except FieldDoesNotExist: # global preferences
pass
return None
preference_models = PreferenceModelsRegistry()
class PreferenceRegistry(persisting_theory.Registry):
"""
Registries are special dictionaries that are used by dynamic-preferences to register and access your preferences.
dynamic-preferences has one registry per Preference type:
- :py:const:`user_preferences`
- :py:const:`site_preferences`
- :py:const:`global_preferences`
In order to register preferences automatically, you must call :py:func:`autodiscover` in your URLconf.
"""
look_into = preferences_settings.REGISTRY_MODULE
#: a name to identify the registry
name = "preferences_registry"
preference_model = None
#: used to reverse urls for sections in form views/templates
section_url_namespace = None
def __init__(self, *args, **kwargs):
super(PreferenceRegistry, self).__init__(*args, **kwargs)
self.section_objects = collections.OrderedDict()
def register(self, preference_class):
"""
Store the given preference class in the registry.
:param preference_class: a :py:class:`prefs.Preference` subclass
"""
preference = preference_class(registry=self)
self.section_objects[preference.section.name] = preference.section
try:
self[preference.section.name][preference.name] = preference
except KeyError:
self[preference.section.name] = collections.OrderedDict()
self[preference.section.name][preference.name] = preference
return preference_class
def _fallback(self, section_name, pref_name):
"""
Create a fallback preference object,
This is used when you have model instances that do not match
any registered preferences, see #41
"""
message = (
'Creating a fallback preference with ' +
'section "{}" and name "{}".' +
'This means you have preferences in your database that ' +
'don\'t match any registered preference. ' +
'If you want to delete these entries, please refer to the ' +
'documentation: https://django-dynamic-preferences.readthedocs.io/en/latest/lifecycle.html') # NOQA
warnings.warn(message.format(section_name, pref_name))
class Fallback(MissingPreference):
section = Section(name=section_name) if section_name else None
name = pref_name
default = ''
help_text = 'Obsolete: missing in registry'
return Fallback()
def get(self, name, section=None, fallback=False):
"""
Returns a previously registered preference
:param section: The section name under which the preference is registered
:type section: str.
:param name: The name of the preference. You can use dotted notation 'section.name' if you want to avoid providing section param
:type name: str.
:param fallback: Should we return a dummy preference object instead of raising an error if no preference is found?
:type name: bool.
:return: a :py:class:`prefs.BasePreference` instance
"""
# try dotted notation
try:
_section, name = name.split(
preferences_settings.SECTION_KEY_SEPARATOR)
return self[_section][name]
except ValueError:
pass
# use standard params
try:
return self[section][name]
except KeyError:
if fallback:
return self._fallback(section_name=section, pref_name=name)
raise NotFoundInRegistry("No such preference in {0} with section={1} and name={2}".format(
self.__class__.__name__, section, name))
def get_by_name(self, name):
"""Get a preference by name only (no section)"""
for section in self.values():
for preference in section.values():
if preference.name == name:
return preference
raise NotFoundInRegistry("No such preference in {0} with name={1}".format(
self.__class__.__name__, name))
def manager(self, **kwargs):
"""Return a preference manager that can be used to retrieve preference values"""
return PreferencesManager(registry=self, model=self.preference_model, **kwargs)
def sections(self):
"""
:return: a list of apps with registered preferences
:rtype: list
"""
return self.keys()
def preferences(self, section=None):
"""
Return a list of all registered preferences
or a list of preferences registered for a given section
:param section: The section name under which the preference is registered
:type section: str.
:return: a list of :py:class:`prefs.BasePreference` instances
"""
if section is None:
return [self[section][name] for section in self for name in self[section]]
else:
return [self[section][name] for name in self[section]]
class PerInstancePreferenceRegistry(PreferenceRegistry):
pass
class GlobalPreferenceRegistry(PreferenceRegistry):
section_url_namespace = 'dynamic_preferences:global.section'
def populate(self, **kwargs):
return self.models(**kwargs)
global_preferences_registry = GlobalPreferenceRegistry()
| 35.417778
| 136
| 0.672732
|
35625937f426c1039b9033c2d3f3ba68c5e7714a
| 4,997
|
py
|
Python
|
s_vlan_to_csv.py
|
zhangineer/securecrt-tools
|
13e980d3fd3881adc06effd27ef29e4821fa9a26
|
[
"Apache-2.0"
] | 168
|
2018-01-05T19:17:17.000Z
|
2022-03-31T06:16:59.000Z
|
s_vlan_to_csv.py
|
zhangineer/securecrt-tools
|
13e980d3fd3881adc06effd27ef29e4821fa9a26
|
[
"Apache-2.0"
] | 19
|
2018-01-11T02:26:44.000Z
|
2022-03-31T15:57:10.000Z
|
s_vlan_to_csv.py
|
zhangineer/securecrt-tools
|
13e980d3fd3881adc06effd27ef29e4821fa9a26
|
[
"Apache-2.0"
] | 61
|
2017-12-21T14:15:30.000Z
|
2022-03-10T01:39:24.000Z
|
# $language = "python"
# $interface = "1.0"
import os
import sys
import logging
# Add script directory to the PYTHONPATH so we can import our modules (only if run from SecureCRT)
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
# Now we can import our custom modules
from securecrt_tools import scripts
from securecrt_tools import utilities
# Create global logger so we can write debug messages from any function (if debug mode setting is enabled in settings).
logger = logging.getLogger("securecrt")
logger.debug("Starting execution of {0}".format(script_name))
# ################################################ SCRIPT LOGIC ###################################################
def script_main(session):
"""
| SINGLE device script
| Author: Jamie Caesar
| Email: jcaesar@presidio.com
This script will output the VLAN database to a CSV file.
One possibly use of this script is to take the .CSV outputs from 2 or more devices, paste them
into a single XLS file and use Excel to highlight duplicate values, so VLAN overlaps can be
discovered prior to connecting switches together via direct link, OTV, etc. This could also be used
to find missing VLANs between 2 large tables that should have the same VLANs.
:param session: A subclass of the sessions.Session object that represents this particular script session (either
SecureCRTSession or DirectSession)
:type session: sessions.Session
"""
# Get script object that owns this session, so we can check settings, get textfsm templates, etc
script = session.script
# Start session with device, i.e. modify term parameters for better interaction (assuming already connected)
session.start_cisco_session()
# Validate device is running a supported OS
session.validate_os(["IOS", "NXOS"])
if session.os == "IOS":
template_file = script.get_template("cisco_ios_show_vlan.template")
else:
template_file = script.get_template("cisco_nxos_show_vlan.template")
raw_vlan = session.get_command_output("show vlan brief")
fsm_results = utilities.textfsm_parse_to_list(raw_vlan, template_file, add_header=True)
normalize_port_list(fsm_results)
output_filename = session.create_output_filename("vlan", ext=".csv")
utilities.list_of_lists_to_csv(fsm_results, output_filename)
# Return terminal parameters back to the original state.
session.end_cisco_session()
def normalize_port_list(vlan_data):
"""
When TextFSM processes a VLAN with a long list of ports, each line will be a separate item in the resulting list.
This fuction combines all of those entries into a single string that contains all of the ports in a comma-separated
list.
:param vlan_data: The VLAN data from TextFSM that will be modified in-place
"""
# VLANs with multiple lines of Ports will have multiple list entries. Combine all into a single string of ports.
# Skip first (header) row
for entry in vlan_data[1:]:
port_list = entry[3]
if len(port_list) > 0:
port_string = ""
for line in port_list:
# Empty list entries contain a single entry. Skip them.
if line == " ":
continue
# If port_string is still empty, add our line to this string.
if port_string == "":
port_string = port_string + line
# If there is something in port-string, concatenate strings with a ", " in between.
else:
port_string = "{0}, {1}".format(port_string, line)
entry[3] = port_string
else:
entry[3] = ""
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, use the SecureCRT specific class
if __name__ == "__builtin__":
# Initialize script object
crt_script = scripts.CRTScript(crt)
# Get session object for the SecureCRT tab that the script was launched from.
crt_session = crt_script.get_main_session()
# Run script's main logic against our session
try:
script_main(crt_session)
except Exception:
crt_session.end_cisco_session()
raise
# Shutdown logging after
logging.shutdown()
# If the script is being run directly, use the simulation class
elif __name__ == "__main__":
# Initialize script object
direct_script = scripts.DebugScript(os.path.realpath(__file__))
# Get a simulated session object to pass into the script.
sim_session = direct_script.get_main_session()
# Run script's main logic against our session
script_main(sim_session)
# Shutdown logging after
logging.shutdown()
| 39.039063
| 119
| 0.668201
|
27ee4b473cec400d4659591487170d37f3a2f800
| 80
|
py
|
Python
|
profanity/config.py
|
TrustedMercury/filter-profanity
|
7c38dbba19e341ad72068338952ff07dc8037e37
|
[
"MIT"
] | 8
|
2020-08-25T01:33:29.000Z
|
2021-02-21T12:01:03.000Z
|
profanity/config.py
|
TrustedMercury/filter-profanity
|
7c38dbba19e341ad72068338952ff07dc8037e37
|
[
"MIT"
] | 2
|
2020-10-20T13:05:05.000Z
|
2020-10-21T00:19:32.000Z
|
profanity/config.py
|
TrustedMercury/filter-profanity
|
7c38dbba19e341ad72068338952ff07dc8037e37
|
[
"MIT"
] | 3
|
2020-10-20T12:10:15.000Z
|
2020-12-05T00:36:10.000Z
|
from pathlib import Path
PROFANE_WORD_LIST_PATH = Path("data/profanity.json")
| 16
| 52
| 0.8
|
17abda85776f7a4584183fb81beb3167f5260ab9
| 499
|
py
|
Python
|
service2/app/grpc_channels.py
|
sneawo/aiohttp-grpcio-example
|
2d7842f08816cbbd93ba394c374085176edce712
|
[
"MIT"
] | null | null | null |
service2/app/grpc_channels.py
|
sneawo/aiohttp-grpcio-example
|
2d7842f08816cbbd93ba394c374085176edce712
|
[
"MIT"
] | null | null | null |
service2/app/grpc_channels.py
|
sneawo/aiohttp-grpcio-example
|
2d7842f08816cbbd93ba394c374085176edce712
|
[
"MIT"
] | null | null | null |
import grpc
from typing import AsyncGenerator
import aiohttp
from .grpc.interceptors import RequestIdInterceptor
from .grpc import service1_pb2_grpc
async def grpc_channels_ctx(app: aiohttp.web.Application) -> AsyncGenerator:
grpc_channel_service1 = grpc.aio.insecure_channel(
app["config"].SERVICE1_CHANNEL, interceptors=(RequestIdInterceptor(),)
)
app["service1_stub"] = service1_pb2_grpc.Service1Stub(grpc_channel_service1)
yield
await grpc_channel_service1.close()
| 33.266667
| 80
| 0.793587
|
8effa7714b2b43176ea36e42d7166583b87953ba
| 1,583
|
py
|
Python
|
loader/input_loader.py
|
Plux1/imagelab
|
dd657522c8328e454b214612833650d8ece46e94
|
[
"MIT"
] | null | null | null |
loader/input_loader.py
|
Plux1/imagelab
|
dd657522c8328e454b214612833650d8ece46e94
|
[
"MIT"
] | null | null | null |
loader/input_loader.py
|
Plux1/imagelab
|
dd657522c8328e454b214612833650d8ece46e94
|
[
"MIT"
] | null | null | null |
from sklearn.utils import shuffle
from imutils import paths
import progressbar
import numpy as np
import cv2
# collect image path and extract labels
def load_path_and_labels(images_dir):
path_list = shuffle(list(paths.list_images(images_dir)))
labels = []
# loop over the paths to extract labels
for path in path_list:
label = path.split('/')[-1].split()[0]
labels.append(label)
# convert labels into a numpy array
labels = np.array(labels)
return path_list, labels
# DataPreprocessor class
class DataLoader:
# constrictor
def __init__(self, preprocessors=None):
self.preprocessors = preprocessors
def load_data(self, path_list):
data = []
load_widget = ['Load Data: ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()]
progress = progressbar.ProgressBar(maxval=len(path_list), widgets=load_widget).start()
# loop over the path list to read and process image data
for (i, path) in enumerate(path_list):
image = cv2.imread(path)
# check to see if preprocessors list is provided
if self.preprocessors is not None:
# loop over the preprocessor
for p in self.preprocessors:
image = p.preprocess(image)
data.append(image)
# update progress bar
progress.update(i)
# convert to array and resize the pixels
data = np.array(data).astype('float32') / 255.0
progress.finish()
return data
| 28.781818
| 111
| 0.631712
|
dd33e3e35769c74a2cd9efe9711f93379eac0f62
| 225
|
py
|
Python
|
dtc/enums/order_type_enum.py
|
jseparovic/python-ws-dtc-client
|
fd3952cdaf7ab8c9d5a26ccf53b5e9acb3a9ea0f
|
[
"Apache-2.0"
] | 15
|
2020-04-26T05:25:53.000Z
|
2022-02-11T19:38:42.000Z
|
dtc/enums/order_type_enum.py
|
jseparovic/python-ws-dtc-client
|
fd3952cdaf7ab8c9d5a26ccf53b5e9acb3a9ea0f
|
[
"Apache-2.0"
] | 2
|
2021-01-08T19:58:08.000Z
|
2021-11-29T06:08:48.000Z
|
dtc/enums/order_type_enum.py
|
jseparovic/python-ws-dtc-client
|
fd3952cdaf7ab8c9d5a26ccf53b5e9acb3a9ea0f
|
[
"Apache-2.0"
] | 4
|
2020-11-23T13:38:01.000Z
|
2021-12-27T13:21:06.000Z
|
class OrderTypeEnum:
ORDER_TYPE_UNSET = 0
ORDER_TYPE_MARKET = 1
ORDER_TYPE_LIMIT = 2
ORDER_TYPE_STOP = 3
ORDER_TYPE_STOP_LIMIT = 4
ORDER_TYPE_MARKET_IF_TOUCHED = 5
ORDER_TYPE_LIMIT_IF_TOUCHED = 6
| 22.5
| 36
| 0.733333
|
745daec1080cae9e5c12b61d1daac2a582d2329f
| 1,570
|
py
|
Python
|
Tools.py
|
Quiltic/DragonTurtle
|
ababc499d4bad6accb250ec3cb9649df1c4a3fc9
|
[
"MIT"
] | null | null | null |
Tools.py
|
Quiltic/DragonTurtle
|
ababc499d4bad6accb250ec3cb9649df1c4a3fc9
|
[
"MIT"
] | null | null | null |
Tools.py
|
Quiltic/DragonTurtle
|
ababc499d4bad6accb250ec3cb9649df1c4a3fc9
|
[
"MIT"
] | null | null | null |
# this is for the basic tools for turtle the ones that i use all the time but dont have to have clutering the main file
if __name__ == "__main__":
# the gunk that is needed for this file. Mostly here so i dont have to get error messages for this file
import discord
from discord.ext import commands
bot = commands.Bot(";")
bertle = 275002179763306517
#all of the needed imports
import asyncio, subprocess
import time, os, sys, requests, math
from datetime import datetime
from random import randrange # achual needed import
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
###################### Basic Commands ###################
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#this is to send complex messages and its from old turtle
async def sendmsg(ctx, msg = 'test', embed = None):
if embed:
await ctx.send(embed=embed)
else:
await ctx.send(msg)
async def sendmsgdirect(ctx, msg = 'embd', embed = None):
if embed:
await ctx.author.send(embed=embed)
else:
await ctx.author.send(msg)
#this is for old commands that dont work without it from message event yeah
async def sendmsgorig(message,msg):
await message.channel.send(msg)
#this is for old commands that dont work without it from message event yeah
async def sendmsgorigdirect(message,msg):
await message.author.send(msg)
# random number, I frankly just wanted this, dont remember why
def randomnum(low,high):
return randrange(low,high+1)
| 30.784314
| 119
| 0.623567
|
f246033c9ad769fd7efdd1ba6e4dfa153a00c632
| 525
|
py
|
Python
|
scripts/aby_tests/zokrates_test_aby.py
|
elefthei/circ
|
12fd024f3cbcc403d4a9d51063563562b78a4c1a
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
scripts/aby_tests/zokrates_test_aby.py
|
elefthei/circ
|
12fd024f3cbcc403d4a9d51063563562b78a4c1a
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
scripts/aby_tests/zokrates_test_aby.py
|
elefthei/circ
|
12fd024f3cbcc403d4a9d51063563562b78a4c1a
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
#!/usr/bin/env python
from util import run_tests
from test_suite import *
if __name__ == "__main__":
tests = arithmetic_tests + \
arithmetic_boolean_tests + \
nary_arithmetic_tests + \
bitwise_tests + \
boolean_tests + \
nary_boolean_tests + \
const_arith_tests + \
const_bool_tests + \
loop_tests + \
ite_tests + \
function_tests + \
misc_tests
# shift_tests + \
# arr_tests + \
run_tests('zok', tests)
| 21
| 36
| 0.569524
|
60b8ebb6c3fcc5770d02e79e53be70e4713d20f1
| 169
|
py
|
Python
|
Lesson04/Code/MySquare1.py
|
pangmi/learntocode
|
719a2bfbc897104d0f95dcf4634fe93427e2c397
|
[
"MIT"
] | null | null | null |
Lesson04/Code/MySquare1.py
|
pangmi/learntocode
|
719a2bfbc897104d0f95dcf4634fe93427e2c397
|
[
"MIT"
] | null | null | null |
Lesson04/Code/MySquare1.py
|
pangmi/learntocode
|
719a2bfbc897104d0f95dcf4634fe93427e2c397
|
[
"MIT"
] | 1
|
2021-12-19T18:01:06.000Z
|
2021-12-19T18:01:06.000Z
|
import turtle
turtle.bgcolor('gray')
t = turtle.Pen()
t.pencolor('blue')
t.forward(200)
t.left(90)
t.forward(200)
t.left(90)
t.forward(200)
t.left(90)
t.forward(200)
| 11.266667
| 22
| 0.692308
|
e9bfdfa1dbba0bf10ea945ebfda60ed165e040de
| 3,772
|
py
|
Python
|
course_flow/utils.py
|
SALTISES4/CourseFlow
|
a55b5966acd6babf688224ec2f57b2ff59545272
|
[
"Apache-2.0"
] | null | null | null |
course_flow/utils.py
|
SALTISES4/CourseFlow
|
a55b5966acd6babf688224ec2f57b2ff59545272
|
[
"Apache-2.0"
] | 2
|
2021-04-20T17:38:27.000Z
|
2021-12-12T21:38:27.000Z
|
course_flow/utils.py
|
SALTISES4/CourseFlow
|
a55b5966acd6babf688224ec2f57b2ff59545272
|
[
"Apache-2.0"
] | null | null | null |
import time
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from course_flow import models
owned_throughmodels = [
"node",
"nodeweek",
"week",
"weekworkflow",
"workflow",
"workflowproject",
"project",
"columnworkflow",
"workflow",
"workflowproject",
"project",
"outcome",
"outcomeoutcome",
"outcome",
]
def get_model_from_str(model_str: str):
return ContentType.objects.get(model=model_str).model_class()
def get_parent_model_str(model_str: str) -> str:
return owned_throughmodels[owned_throughmodels.index(model_str) + 1]
def get_parent_model(model_str: str):
return ContentType.objects.get(
model=get_parent_model_str(model_str)
).model_class()
def linkIDMap(link):
return link.id
def get_project_outcomes(project):
# this should probably be replaced with a single recursive raw sql call...
# but not by me
outcomes = project.outcomes.all()
for outcome in outcomes:
outcomes = outcomes | get_descendant_outcomes(outcome)
return outcomes
def get_descendant_outcomes(outcome):
return models.Outcome.objects.filter(
Q(parent_outcomes=outcome)
| Q(parent_outcomes__parent_outcomes=outcome)
)
def get_all_outcomes_for_outcome(outcome):
outcomes = models.Outcome.objects.filter(
Q(parent_outcomes=outcome)
| Q(parent_outcomes__parent_outcomes=outcome)
).prefetch_related("outcome_horizontal_links", "child_outcome_links")
outcomeoutcomes = models.OutcomeOutcome.objects.filter(
Q(parent=outcome) | Q(parent__parent_outcomes=outcome)
)
return outcomes, outcomeoutcomes
def get_all_outcomes_for_workflow(workflow):
outcomes = models.Outcome.objects.filter(
Q(workflow=workflow)
| Q(parent_outcomes__workflow=workflow)
| Q(parent_outcomes__parent_outcomes__workflow=workflow)
).prefetch_related("outcome_horizontal_links", "child_outcome_links")
outcomeoutcomes = models.OutcomeOutcome.objects.filter(
Q(parent__workflow=workflow)
| Q(parent__parent_outcomes__workflow=workflow)
)
return outcomes, outcomeoutcomes
def get_unique_outcomenodes(node):
exclude_outcomes = models.Outcome.objects.filter(
Q(parent_outcomes__node=node)
| Q(parent_outcomes__parent_outcomes__node=node)
)
return node.outcomenode_set.exclude(outcome__in=exclude_outcomes).order_by(
"outcome__parent_outcome_links__parent__parent_outcome_links__parent__outcomeworkflow__rank",
"outcome__parent_outcome_links__parent__outcomeworkflow__rank",
"outcome__outcomeworkflow__rank",
"outcome__parent_outcome_links__parent__parent_outcome_links__rank",
"outcome__parent_outcome_links__rank"
)
def get_unique_outcomehorizontallinks(outcome):
exclude_outcomes = models.Outcome.objects.filter(
Q(parent_outcomes__reverse_horizontal_outcomes=outcome)
| Q(
parent_outcomes__parent_outcomes__reverse_horizontal_outcomes=outcome
)
)
return outcome.outcome_horizontal_links.exclude(
parent_outcome__in=exclude_outcomes
).order_by(
"parent_outcome__parent_outcome_links__parent__parent_outcome_links__parent__outcomeworkflow__rank",
"parent_outcome__parent_outcome_links__parent__outcomeworkflow__rank",
"parent_outcome__outcomeworkflow__rank",
"parent_outcome__parent_outcome_links__parent__parent_outcome_links__rank",
"parent_outcome__parent_outcome_links__rank"
)
def benchmark(identifier, last_time):
current_time = time.time()
print("Completed " + identifier + " in " + str(current_time - last_time))
return current_time
| 30.918033
| 108
| 0.746819
|
bd9e5c8c5d7463dd91d4632d6d53f8476c6357c8
| 781
|
py
|
Python
|
recipes/bwa/run_test.py
|
faircloth-lab/conda-recipes
|
75a520a75a357ea47ee80262f3c3a6dfe1b0715f
|
[
"BSD-3-Clause"
] | 2
|
2018-08-02T22:40:59.000Z
|
2018-08-30T02:58:49.000Z
|
recipes/bwa/run_test.py
|
faircloth-lab/conda-recipes
|
75a520a75a357ea47ee80262f3c3a6dfe1b0715f
|
[
"BSD-3-Clause"
] | 1
|
2015-10-21T13:53:30.000Z
|
2015-10-21T17:14:20.000Z
|
recipes/bwa/run_test.py
|
faircloth-lab/conda-recipes
|
75a520a75a357ea47ee80262f3c3a6dfe1b0715f
|
[
"BSD-3-Clause"
] | 6
|
2015-09-21T13:37:06.000Z
|
2020-03-26T14:42:57.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
File: run_test.py
Author: Brant Faircloth
Created by Brant Faircloth on 23 December 2013 19:08 PST (-0800)
Copyright (c) 2013 Brant C. Faircloth. All rights reserved.
"""
import os
import unittest
import subprocess
class TestAlignments(unittest.TestCase):
def test_samtools(self):
cmd = [os.path.join(os.path.join(os.environ["PREFIX"], "bin", "bwa"))]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.stdout, self.stderr = proc.communicate()
stderr = self.stderr.split("\n")
assert stderr[1] == "Program: bwa (alignment via Burrows-Wheeler transformation)"
assert stderr[2] == "Version: 0.7.7-r441"
if __name__ == '__main__':
unittest.main()
| 27.892857
| 89
| 0.682458
|
84944e6b3e324454c4e18ffe2ff9cb73673b53a7
| 1,468
|
py
|
Python
|
AD9/api.py
|
ReneCapella/pythonTinkering
|
93a5080e8ef6f67fe8ca0b67eb1fb27370beb26a
|
[
"MIT"
] | null | null | null |
AD9/api.py
|
ReneCapella/pythonTinkering
|
93a5080e8ef6f67fe8ca0b67eb1fb27370beb26a
|
[
"MIT"
] | null | null | null |
AD9/api.py
|
ReneCapella/pythonTinkering
|
93a5080e8ef6f67fe8ca0b67eb1fb27370beb26a
|
[
"MIT"
] | null | null | null |
# Start with the code in a given example
#
# Add two more features:
#
# 1. a way for the user to focus the query on one or more selected keywords
# 2. improved output for the query results
# Upload your improved code as a .py file for testing.
import requests
import json
categories_response = requests.get("https://api.publicapis.org/categories")
val = json.dumps(categories_response.json())
val1 = json.loads(val)
selection = 0
while selection < 1 or selection > 2:
selection = input("""
Please choose an option from the menu below:\n
1 - Search APIs by Category
2 - Some random API
""")
selection = int(selection)
if selection == 1:
categories = val1["categories"]
txt = ", "
cat_str = txt.join(categories)
print("Here are the categories to choose from:\n" + cat_str)
choice = ""
while not choice in categories:
choice = input("Please input a category from the list: ")
response = requests.get("https://api.publicapis.org/entries?category=" + choice)
else:
response = requests.get("https://api.publicapis.org/random")
def jprint(obj):
# create a formatted string of the Python JSON object
print("+++++++++ API +++++++++")
val = json.dumps(obj)
val1 = json.loads(val)
for entry in val1["entries"]:
print("--------------------")
for key in val1["entries"][0]:
print(key, ": " + str(val1["entries"][0][key]))
jprint(response.json())
| 29.959184
| 84
| 0.638283
|
0fa49ae3eaa303a6bb909ced394faac6e6f20ad8
| 5,731
|
py
|
Python
|
tests/test_util.py
|
QianWanghhu/SALib
|
95a3371e503f9253cb917b8f0101c0202b969c2b
|
[
"MIT"
] | 1
|
2019-12-20T00:32:45.000Z
|
2019-12-20T00:32:45.000Z
|
tests/test_util.py
|
QianWanghhu/SALib
|
95a3371e503f9253cb917b8f0101c0202b969c2b
|
[
"MIT"
] | null | null | null |
tests/test_util.py
|
QianWanghhu/SALib
|
95a3371e503f9253cb917b8f0101c0202b969c2b
|
[
"MIT"
] | null | null | null |
from pytest import raises
from numpy.testing import assert_equal, assert_allclose
import numpy as np
import pytest
from SALib.util import (read_param_file, _scale_samples, _unscale_samples,
compute_groups_matrix)
from SALib.sample import latin
@pytest.fixture(scope='function')
def setup_param_file_group_dist(make_temporary_file):
filename = make_temporary_file
with open(filename, "w") as ofile:
ofile.write("Test1 0.0 100.0 Group1 unif\n")
ofile.write("Test2 5.0 51.0 Group1 triang\n")
ofile.write("Test3 10.0 1.0 Group2 norm\n")
return filename
@pytest.fixture(scope='function')
def setup_csv_param_file_space(make_temporary_file):
filename = make_temporary_file
with open(filename, "w") as ofile:
ofile.write("Test 1,0.0,100.0\n")
ofile.write("Test 2,5.0,51.0\n")
return filename
@pytest.fixture(scope='function')
def setup_tab_param_file_espace_names(make_temporary_file):
filename = make_temporary_file
with open(filename, "w") as ofile:
ofile.write("Test 1\t0.0\t100.0\n")
ofile.write("Test 2\t5.0\t51.0\n")
return filename
@pytest.fixture(scope='function')
def setup_csv_param_file_space_comments(make_temporary_file):
filename = make_temporary_file
with open(filename, "w") as ofile:
ofile.write("# Here is a comment\n")
ofile.write("'Test 1',0.0,100.0\n")
ofile.write("'Test 2',5.0,51.0\n")
return filename
def test_readfile(setup_function):
"""
Tests a standard parameter file is read correctly
"""
filename = setup_function
pf = read_param_file(filename)
assert_equal(pf['bounds'], [[0, 100], [5, 51]])
assert_equal(pf['num_vars'], 2)
assert_equal(pf['names'], ['Test1', 'Test2'])
def test_readfile_group_dist(setup_param_file_group_dist):
"""
Tests a parameter file with groups and distributions is read correctly
"""
filename = setup_param_file_group_dist
pf = read_param_file(filename)
assert_equal(pf['bounds'], [[0, 100], [5, 51], [10, 1]])
assert_equal(pf['num_vars'], 3)
assert_equal(pf['names'], ['Test1', 'Test2', 'Test3'])
assert_equal(pf['groups'], ['Group1', 'Group1', 'Group2'])
assert_equal(pf['dists'], ['unif', 'triang', 'norm'])
def test_csv_readfile_with_whitespace(setup_csv_param_file_space):
"""
A comma delimited parameter file with whitespace in the names
"""
filename = setup_csv_param_file_space
pf = read_param_file(filename)
assert_equal(pf['bounds'], [[0, 100], [5, 51]])
assert_equal(pf['num_vars'], 2)
assert_equal(pf['names'], ['Test 1', 'Test 2'])
def test_tab_readfile_whitespace(setup_tab_param_file_espace_names):
"""
A tab delimited parameter file with whitespace in the names
"""
filename = setup_tab_param_file_espace_names
pf = read_param_file(filename)
assert_equal(pf['bounds'], [[0, 100], [5, 51]])
assert_equal(pf['num_vars'], 2)
assert_equal(pf['names'], ['Test 1', 'Test 2'])
def test_csv_readfile_comments(setup_csv_param_file_space_comments):
"""
"""
filename = setup_csv_param_file_space_comments
pf = read_param_file(filename)
print(pf['bounds'], pf['num_vars'], pf['names'])
assert_equal(pf['bounds'], [[0, 100], [5, 51]])
assert_equal(pf['num_vars'], 2)
assert_equal(pf['names'], ['Test 1', 'Test 2'])
# Test scale samples
def test_scale_samples():
"""
Simple test to ensure that samples are correctly scaled
"""
params = np.arange(0, 1.1, 0.1).repeat(2).reshape((11, 2))
bounds = [[10, 20], [-10, 10]]
desired = np.array(
[np.arange(10, 21, 1), np.arange(-10, 12, 2)], dtype=np.float).T
_scale_samples(params, bounds)
assert_allclose(params, desired, atol=1e-03, rtol=1e-03)
def test_unscale_samples():
"""
Simple test to unscale samples back to [0,1] range
"""
params = np.array(
[np.arange(10, 21, 1), np.arange(-10, 12, 2)], dtype=np.float).T
bounds = [[10, 20], [-10, 10]]
desired = np.arange(0, 1.1, 0.1).repeat(2).reshape((11, 2))
_unscale_samples(params, bounds)
assert_allclose(params, desired, atol=1e-03, rtol=1e-03)
def test_scale_samples_upper_lt_lower():
"""
Raise ValueError if upper bound lower than lower bound
"""
params = np.array([[0, 0], [0.1, 0.1], [0.2, 0.2]])
bounds = [[10, 9], [-10, 10]]
with raises(ValueError):
_scale_samples(params, bounds)
def test_scale_samples_upper_eq_lower():
"""
Raise ValueError if upper bound lower equal to lower bound
"""
params = np.array([[0, 0], [0.1, 0.1], [0.2, 0.2]])
bounds = [[10, 10], [-10, 10]]
with raises(ValueError):
_scale_samples(params, bounds)
def test_compute_groups_from_parameter_file():
"""
Tests that a group file is read correctly
"""
actual_matrix, actual_unique_names = \
compute_groups_matrix(['Group 1', 'Group 2', 'Group 2'])
assert_equal(actual_matrix, np.array(
[[1, 0], [0, 1], [0, 1]], dtype=np.int))
assert_equal(actual_unique_names, ['Group 1', 'Group 2'])
def test_nonuniform_scale_samples_truncnorm():
"""
Test the rescaling of samples for truncated normal distribution
"""
problem = {
'num_vars': 1,
'dists': ['truncnorm'],
'bounds': [[0, 3.14, 2, 1]],
'names': ['x1']
}
actual = latin.sample(problem, 10, seed=42)
expected = np.array(
[[2.68693037], [1.34115848], [0.39811064],
[2.09477163], [2.49999031], [3.028063],
[1.5564238], [1.11686499], [1.68414443],
[1.9022482]]
)
np.testing.assert_allclose(actual, expected)
| 29.239796
| 74
| 0.645437
|
c16da485b57ad27e5f272c51a57967e817d5e5ee
| 2,692
|
py
|
Python
|
homeassistant/components/plex/services.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 6
|
2016-11-25T06:36:27.000Z
|
2021-11-16T11:20:23.000Z
|
homeassistant/components/plex/services.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 52
|
2020-07-14T14:12:26.000Z
|
2022-03-31T06:24:02.000Z
|
homeassistant/components/plex/services.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 2
|
2020-05-11T00:38:26.000Z
|
2021-01-15T13:23:44.000Z
|
"""Services for the Plex integration."""
import logging
from plexapi.exceptions import NotFound
import voluptuous as vol
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
DOMAIN,
PLEX_UPDATE_PLATFORMS_SIGNAL,
SERVERS,
SERVICE_REFRESH_LIBRARY,
SERVICE_SCAN_CLIENTS,
)
REFRESH_LIBRARY_SCHEMA = vol.Schema(
{vol.Optional("server_name"): str, vol.Required("library_name"): str}
)
_LOGGER = logging.getLogger(__package__)
async def async_setup_services(hass):
"""Set up services for the Plex component."""
async def async_refresh_library_service(service_call):
await hass.async_add_executor_job(refresh_library, hass, service_call)
async def async_scan_clients_service(_):
_LOGGER.debug("Scanning for new Plex clients")
for server_id in hass.data[DOMAIN][SERVERS]:
async_dispatcher_send(hass, PLEX_UPDATE_PLATFORMS_SIGNAL.format(server_id))
hass.services.async_register(
DOMAIN,
SERVICE_REFRESH_LIBRARY,
async_refresh_library_service,
schema=REFRESH_LIBRARY_SCHEMA,
)
hass.services.async_register(
DOMAIN, SERVICE_SCAN_CLIENTS, async_scan_clients_service
)
return True
def refresh_library(hass, service_call):
"""Scan a Plex library for new and updated media."""
plex_server_name = service_call.data.get("server_name")
library_name = service_call.data["library_name"]
plex_server = get_plex_server(hass, plex_server_name)
if not plex_server:
return
try:
library = plex_server.library.section(title=library_name)
except NotFound:
_LOGGER.error(
"Library with name '%s' not found in %s",
library_name,
[x.title for x in plex_server.library.sections()],
)
return
_LOGGER.debug("Scanning %s for new and updated media", library_name)
library.update()
def get_plex_server(hass, plex_server_name=None):
"""Retrieve a configured Plex server by name."""
plex_servers = hass.data[DOMAIN][SERVERS].values()
if plex_server_name:
plex_server = [x for x in plex_servers if x.friendly_name == plex_server_name]
if not plex_server:
_LOGGER.error(
"Requested Plex server '%s' not found in %s",
plex_server_name,
[x.friendly_name for x in plex_servers],
)
return None
elif len(plex_servers) == 1:
return next(iter(plex_servers))
_LOGGER.error(
"Multiple Plex servers configured and no selection made: %s",
[x.friendly_name for x in plex_servers],
)
return None
| 29.26087
| 87
| 0.682764
|
5d530c12a3d14b533198a8b290ee7d9238b148c4
| 2,330
|
py
|
Python
|
oa/plugins/uri_detail.py
|
Worteks/OrangeAssassin
|
21baf0b84fbedd887f6d88e13c624f14fb0b5e06
|
[
"Apache-2.0"
] | null | null | null |
oa/plugins/uri_detail.py
|
Worteks/OrangeAssassin
|
21baf0b84fbedd887f6d88e13c624f14fb0b5e06
|
[
"Apache-2.0"
] | null | null | null |
oa/plugins/uri_detail.py
|
Worteks/OrangeAssassin
|
21baf0b84fbedd887f6d88e13c624f14fb0b5e06
|
[
"Apache-2.0"
] | null | null | null |
"""URIDetail Plugin."""
from __future__ import absolute_import
try:
from urllib.parse import unquote
from urllib.parse import urlparse
except ImportError:
from urllib import unquote
from urlparse import urlparse
import oa.regex
import oa.rules.uri
import oa.plugins.base
import oa.html_parser
URI_DRREG = oa.regex.Regex(
r"(?P<key>\w*)\s+(?P<op>[\=\!\~]{1,2})\s+(?P<regex>/.*?/)")
class URIDetailRule(oa.rules.uri.URIRule):
"""Implements the uri_detail rule
"""
_rule_type = "uri_detail"
def __init__(self, name, pattern, score=None, desc=None):
super(URIDetailRule, self).__init__(name, pattern, score, desc)
def check_single_item(self, value):
"""Checks one item agains the patterns, return True if all
matches.
"""
for key, regex in self._pattern:
if key not in value:
# Does not match...
return False
data = value[key]
match = regex.match(data)
# All items should match to return True.
if not match:
return False
return True
def match(self, msg):
for key in msg.uri_detail_links:
for type in msg.uri_detail_links[key]:
value = msg.uri_detail_links[key][type]
result = self.check_single_item(value)
if result:
# At least this link match, return True
return True
return False
@staticmethod
def get_rule_kwargs(data):
rule_value = data["value"]
checks = URI_DRREG.findall(rule_value)
patterns = []
for key, oper, regex in checks:
pyregex = oa.regex.perl2re(regex, oper)
patterns.append((key, pyregex))
kwargs = {"pattern": patterns}
return kwargs
class URIDetailPlugin(oa.plugins.base.BasePlugin):
"""Implements URIDetail plugin.
"""
options = {'uri_detail': ("list", [])}
cmds = {"uri_detail": URIDetailRule}
def __init__(self, *args, **kwargs):
super(URIDetailPlugin, self).__init__(*args, **kwargs)
def parsed_metadata(self, msg):
"""Goes through the URIs, parse them and store them locally in the
message"""
oa.html_parser.parsed_metadata(msg, self.ctxt)
| 29.493671
| 74
| 0.6
|
1f64e7369e2d30f5c90fb9a3b22d2fa8ae997ad8
| 1,339
|
py
|
Python
|
baselines/scikit_knn.py
|
dericp/digit-recognition
|
089458ea06f35af0adb809185f1edc60bb7b2674
|
[
"MIT"
] | null | null | null |
baselines/scikit_knn.py
|
dericp/digit-recognition
|
089458ea06f35af0adb809185f1edc60bb7b2674
|
[
"MIT"
] | null | null | null |
baselines/scikit_knn.py
|
dericp/digit-recognition
|
089458ea06f35af0adb809185f1edc60bb7b2674
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[24]:
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from matplotlib import pyplot as plt
K_VALS = [1, 5, 25, 125, 625]
df_train = pd.read_csv("train.csv", nrows = 1000)
df_train['intercept'] = 1
trainingData = df_train.drop("label", axis = 1).values
trainingResults = df_train["label"].values
errors = []
for i in range(len(K_VALS)):
k = K_VALS[i]
# build the validation set
start_index = i * len(trainingData)//len(K_VALS)
end_index = len(trainingData)//len(K_VALS) * (i + 1)
validation_data = trainingData[start_index:end_index]
validation_classifications = trainingResults[start_index:end_index]
# build the model
model = np.concatenate((trainingData[:start_index], trainingData[end_index:]), axis=0)
model_classifications = np.concatenate((trainingResults[:start_index], trainingResults[end_index:]), axis=0)
classifier = KNeighborsClassifier(n_neighbors=k, weights='distance')
classifier.fit(model, model_classifications)
score = classifier.score(validation_data, validation_classifications)
errors.append(1 - score)
plt.plot(K_VALS, errors)
plt.title("K vs. Classification Error")
plt.xlabel("k value")
plt.xscale('log')
plt.ylabel("classification error")
plt.savefig('k-nn-libraryimpl.png')
plt.show()
| 30.431818
| 112
| 0.732636
|
ad880f1aaaaa85cd9972a3987f916522feb2565e
| 776
|
py
|
Python
|
nuclides/utils/fill_db.py
|
maluethi/nuclides
|
26d84214c4412849794eec6caf0bdb9bd9707aab
|
[
"MIT"
] | null | null | null |
nuclides/utils/fill_db.py
|
maluethi/nuclides
|
26d84214c4412849794eec6caf0bdb9bd9707aab
|
[
"MIT"
] | null | null | null |
nuclides/utils/fill_db.py
|
maluethi/nuclides
|
26d84214c4412849794eec6caf0bdb9bd9707aab
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from nuclides.utils.gen_db import Decays, Nuclides, Elements, Base
engine = create_engine('sqlite:///nuclides.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Insert an Element in the element table
new_element = Elements(name="Ma", Z=10, NStart=11, NRange=2)
session.add(new_element)
session.commit()
Ti = session.query(Elements).filter(Elements.Z == 10)[0]
print(Ti)
# Insert an Address in the address table
new_nuclide = Nuclides(Z=10, N=11, mass=1.2, stable=True, element=Ti)
session.add(new_nuclide)
session.commit()
| 27.714286
| 69
| 0.770619
|
61e2ff4db578543f9f2694f239f03439bfab2c41
| 10,042
|
py
|
Python
|
sympy/physics/optics/waves.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 1
|
2020-09-09T20:40:17.000Z
|
2020-09-09T20:40:17.000Z
|
sympy/physics/optics/waves.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 14
|
2018-02-08T10:11:03.000Z
|
2019-04-16T10:32:46.000Z
|
sympy/physics/optics/waves.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 1
|
2022-02-04T13:50:29.000Z
|
2022-02-04T13:50:29.000Z
|
"""
This module has all the classes and functions related to waves in optics.
**Contains**
* TWave
"""
__all__ = ['TWave']
from sympy.core.basic import Basic
from sympy.core.expr import Expr
from sympy.core.function import Derivative, Function
from sympy.core.numbers import (Number, pi, I)
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.core.sympify import _sympify, sympify
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (atan2, cos, sin)
from sympy.physics.units import speed_of_light, meter, second
c = speed_of_light.convert_to(meter/second)
class TWave(Expr):
r"""
This is a simple transverse sine wave travelling in a one-dimensional space.
Basic properties are required at the time of creation of the object,
but they can be changed later with respective methods provided.
Explanation
===========
It is represented as :math:`A \times cos(k*x - \omega \times t + \phi )`,
where :math:`A` is the amplitude, :math:`\omega` is the angular frequency,
:math:`k` is the wavenumber (spatial frequency), :math:`x` is a spatial variable
to represent the position on the dimension on which the wave propagates,
and :math:`\phi` is the phase angle of the wave.
Arguments
=========
amplitude : Sympifyable
Amplitude of the wave.
frequency : Sympifyable
Frequency of the wave.
phase : Sympifyable
Phase angle of the wave.
time_period : Sympifyable
Time period of the wave.
n : Sympifyable
Refractive index of the medium.
Raises
=======
ValueError : When neither frequency nor time period is provided
or they are not consistent.
TypeError : When anything other than TWave objects is added.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A1, phi1, A2, phi2, f = symbols('A1, phi1, A2, phi2, f')
>>> w1 = TWave(A1, f, phi1)
>>> w2 = TWave(A2, f, phi2)
>>> w3 = w1 + w2 # Superposition of two waves
>>> w3
TWave(sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2), f,
atan2(A1*sin(phi1) + A2*sin(phi2), A1*cos(phi1) + A2*cos(phi2)), 1/f, n)
>>> w3.amplitude
sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2)
>>> w3.phase
atan2(A1*sin(phi1) + A2*sin(phi2), A1*cos(phi1) + A2*cos(phi2))
>>> w3.speed
299792458*meter/(second*n)
>>> w3.angular_velocity
2*pi*f
"""
def __new__(
cls,
amplitude,
frequency=None,
phase=S.Zero,
time_period=None,
n=Symbol('n')):
if time_period is not None:
time_period = _sympify(time_period)
_frequency = S.One/time_period
if frequency is not None:
frequency = _sympify(frequency)
_time_period = S.One/frequency
if time_period is not None:
if frequency != S.One/time_period:
raise ValueError("frequency and time_period should be consistent.")
if frequency is None and time_period is None:
raise ValueError("Either frequency or time period is needed.")
if frequency is None:
frequency = _frequency
if time_period is None:
time_period = _time_period
amplitude = _sympify(amplitude)
phase = _sympify(phase)
n = sympify(n)
obj = Basic.__new__(cls, amplitude, frequency, phase, time_period, n)
return obj
@property
def amplitude(self):
"""
Returns the amplitude of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.amplitude
A
"""
return self.args[0]
@property
def frequency(self):
"""
Returns the frequency of the wave,
in cycles per second.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.frequency
f
"""
return self.args[1]
@property
def phase(self):
"""
Returns the phase angle of the wave,
in radians.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.phase
phi
"""
return self.args[2]
@property
def time_period(self):
"""
Returns the temporal period of the wave,
in seconds per cycle.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.time_period
1/f
"""
return self.args[3]
@property
def n(self):
"""
Returns the refractive index of the medium
"""
return self.args[4]
@property
def wavelength(self):
"""
Returns the wavelength (spatial period) of the wave,
in meters per cycle.
It depends on the medium of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.wavelength
299792458*meter/(second*f*n)
"""
return c/(self.frequency*self.n)
@property
def speed(self):
"""
Returns the propagation speed of the wave,
in meters per second.
It is dependent on the propagation medium.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.speed
299792458*meter/(second*n)
"""
return self.wavelength*self.frequency
@property
def angular_velocity(self):
"""
Returns the angular velocity of the wave,
in radians per second.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.angular_velocity
2*pi*f
"""
return 2*pi*self.frequency
@property
def wavenumber(self):
"""
Returns the wavenumber of the wave,
in radians per meter.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.wavenumber
pi*second*f*n/(149896229*meter)
"""
return 2*pi/self.wavelength
def __str__(self):
"""String representation of a TWave."""
from sympy.printing import sstr
return type(self).__name__ + sstr(self.args)
__repr__ = __str__
def __add__(self, other):
"""
Addition of two waves will result in their superposition.
The type of interference will depend on their phase angles.
"""
if isinstance(other, TWave):
if self.frequency == other.frequency and self.wavelength == other.wavelength:
return TWave(sqrt(self.amplitude**2 + other.amplitude**2 + 2 *
self.amplitude*other.amplitude*cos(
self.phase - other.phase)),
self.frequency,
atan2(self.amplitude*sin(self.phase)
+ other.amplitude*sin(other.phase),
self.amplitude*cos(self.phase)
+ other.amplitude*cos(other.phase))
)
else:
raise NotImplementedError("Interference of waves with different frequencies"
" has not been implemented.")
else:
raise TypeError(type(other).__name__ + " and TWave objects cannot be added.")
def __mul__(self, other):
"""
Multiplying a wave by a scalar rescales the amplitude of the wave.
"""
other = sympify(other)
if isinstance(other, Number):
return TWave(self.amplitude*other, *self.args[1:])
else:
raise TypeError(type(other).__name__ + " and TWave objects cannot be multiplied.")
def __sub__(self, other):
return self.__add__(-1*other)
def __neg__(self):
return self.__mul__(-1)
def __radd__(self, other):
return self.__add__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __rsub__(self, other):
return (-self).__radd__(other)
def _eval_rewrite_as_sin(self, *args, **kwargs):
return self.amplitude*sin(self.wavenumber*Symbol('x')
- self.angular_velocity*Symbol('t') + self.phase + pi/2, evaluate=False)
def _eval_rewrite_as_cos(self, *args, **kwargs):
return self.amplitude*cos(self.wavenumber*Symbol('x')
- self.angular_velocity*Symbol('t') + self.phase)
def _eval_rewrite_as_pde(self, *args, **kwargs):
mu, epsilon, x, t = symbols('mu, epsilon, x, t')
E = Function('E')
return Derivative(E(x, t), x, 2) + mu*epsilon*Derivative(E(x, t), t, 2)
def _eval_rewrite_as_exp(self, *args, **kwargs):
return self.amplitude*exp(I*(self.wavenumber*Symbol('x')
- self.angular_velocity*Symbol('t') + self.phase))
| 29.44868
| 94
| 0.563732
|
5c32a27c0a4ca01955275fc93125fb4cc649e952
| 5,724
|
py
|
Python
|
src/dataset.py
|
RedHenLab/punctuation-restoration
|
0f32a37cc4db37a818eb22560b31010c67a63a56
|
[
"MIT"
] | null | null | null |
src/dataset.py
|
RedHenLab/punctuation-restoration
|
0f32a37cc4db37a818eb22560b31010c67a63a56
|
[
"MIT"
] | null | null | null |
src/dataset.py
|
RedHenLab/punctuation-restoration
|
0f32a37cc4db37a818eb22560b31010c67a63a56
|
[
"MIT"
] | null | null | null |
import torch
from config import *
from augmentation import *
import numpy as np
def parse_data(file_path, tokenizer, sequence_len, token_style):
"""
:param file_path: text file path that contains tokens and punctuations separated by tab in lines
:param tokenizer: tokenizer that will be used to further tokenize word for BERT like models
:param sequence_len: maximum length of each sequence
:param token_style: For getting index of special tokens in config.TOKEN_IDX
:return: list of [tokens_index, punctuation_index, attention_masks, punctuation_mask], each having sequence_len
punctuation_mask is used to ignore special indices like padding and intermediate sub-word token during evaluation
"""
data_items = []
with open(file_path, 'r', encoding='utf-8') as f:
lines = [line for line in f.read().split('\n') if line.strip()]
idx = 0
# loop until end of the entire text
while idx < len(lines):
x = [TOKEN_IDX[token_style]['START_SEQ']]
y = [0]
y_mask = [1] # which positions we need to consider while evaluating i.e., ignore pad or sub tokens
# loop until we have required sequence length
# -1 because we will have a special end of sequence token at the end
while len(x) < sequence_len - 1 and idx < len(lines):
word, punc = lines[idx].split('\t')
tokens = tokenizer.tokenize(word)
# if taking these tokens exceeds sequence length we finish current sequence with padding
# then start next sequence from this token
if len(tokens) + len(x) >= sequence_len:
break
else:
for i in range(len(tokens) - 1):
x.append(tokenizer.convert_tokens_to_ids(tokens[i]))
y.append(0)
y_mask.append(0)
if len(tokens) > 0:
x.append(tokenizer.convert_tokens_to_ids(tokens[-1]))
else:
x.append(TOKEN_IDX[token_style]['UNK'])
y.append(punctuation_dict[punc])
y_mask.append(1)
idx += 1
x.append(TOKEN_IDX[token_style]['END_SEQ'])
y.append(0)
y_mask.append(1)
if len(x) < sequence_len:
x = x + [TOKEN_IDX[token_style]['PAD'] for _ in range(sequence_len - len(x))]
y = y + [0 for _ in range(sequence_len - len(y))]
y_mask = y_mask + [0 for _ in range(sequence_len - len(y_mask))]
attn_mask = [1 if token != TOKEN_IDX[token_style]['PAD'] else 0 for token in x]
data_items.append([x, y, attn_mask, y_mask])
return data_items
class Dataset(torch.utils.data.Dataset):
def __init__(self, files, tokenizer, sequence_len, token_style, is_train=False, augment_rate=0.1,
augment_type='substitute'):
"""
:param files: single file or list of text files containing tokens and punctuations separated by tab in lines
:param tokenizer: tokenizer that will be used to further tokenize word for BERT like models
:param sequence_len: length of each sequence
:param token_style: For getting index of special tokens in config.TOKEN_IDX
:param augment_rate: token augmentation rate when preparing data
:param is_train: if false do not apply augmentation
"""
if isinstance(files, list):
self.data = []
for file in files:
self.data += parse_data(file, tokenizer, sequence_len, token_style)
else:
self.data = parse_data(files, tokenizer, sequence_len, token_style)
self.sequence_len = sequence_len
self.augment_rate = augment_rate
self.token_style = token_style
self.is_train = is_train
self.augment_type = augment_type
def __len__(self):
return len(self.data)
def _augment(self, x, y, y_mask):
x_aug = []
y_aug = []
y_mask_aug = []
for i in range(len(x)):
r = np.random.rand()
if r < self.augment_rate:
AUGMENTATIONS[self.augment_type](x, y, y_mask, x_aug, y_aug, y_mask_aug, i, self.token_style)
else:
x_aug.append(x[i])
y_aug.append(y[i])
y_mask_aug.append(y_mask[i])
if len(x_aug) > self.sequence_len:
# len increased due to insert
x_aug = x_aug[0:self.sequence_len]
y_aug = y_aug[0:self.sequence_len]
y_mask_aug = y_mask_aug[0:self.sequence_len]
elif len(x_aug) < self.sequence_len:
# len decreased due to delete
x_aug = x_aug + [TOKEN_IDX[self.token_style]['PAD'] for _ in range(self.sequence_len - len(x_aug))]
y_aug = y_aug + [0 for _ in range(self.sequence_len - len(y_aug))]
y_mask_aug = y_mask_aug + [0 for _ in range(self.sequence_len - len(y_mask_aug))]
attn_mask = [1 if token != TOKEN_IDX[self.token_style]['PAD'] else 0 for token in x]
return x_aug, y_aug, attn_mask, y_mask_aug
def __getitem__(self, index):
x = self.data[index][0]
y = self.data[index][1]
attn_mask = self.data[index][2]
y_mask = self.data[index][3]
if self.is_train and self.augment_rate > 0:
x, y, attn_mask, y_mask = self._augment(x, y, y_mask)
x = torch.tensor(x)
y = torch.tensor(y)
attn_mask = torch.tensor(attn_mask)
y_mask = torch.tensor(y_mask)
return x, y, attn_mask, y_mask
| 44.372093
| 117
| 0.596087
|
7d0c6c4f62cc46a41b10b45361d77105b1fd49ce
| 6,830
|
py
|
Python
|
extra_apps/xadmin/plugins/editable.py
|
txqzzz/831net-backend
|
c73167124b6a10a774e873389900d31fb15a842c
|
[
"CC0-1.0"
] | null | null | null |
extra_apps/xadmin/plugins/editable.py
|
txqzzz/831net-backend
|
c73167124b6a10a774e873389900d31fb15a842c
|
[
"CC0-1.0"
] | null | null | null |
extra_apps/xadmin/plugins/editable.py
|
txqzzz/831net-backend
|
c73167124b6a10a774e873389900d31fb15a842c
|
[
"CC0-1.0"
] | null | null | null |
from django import template
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.db import models, transaction
from django.forms.models import modelform_factory
from django.http import Http404, HttpResponse
from django.utils.encoding import force_text, smart_text
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from xadmin.layout import FormHelper
from xadmin.plugins.ajax import JsonErrorDict
from xadmin.sites import site
from xadmin.util import lookup_field, display_for_field, label_for_field, unquote, boolean_icon
from xadmin.views import BaseAdminPlugin, ModelFormAdminView, ListAdminView
from xadmin.views.base import csrf_protect_m, filter_hook
from xadmin.views.edit import ModelFormAdminUtil
from xadmin.views.list import EMPTY_CHANGELIST_VALUE
class EditablePlugin(BaseAdminPlugin):
list_editable = []
def __init__(self, admin_view):
super(EditablePlugin, self).__init__(admin_view)
self.editable_need_fields = {}
def init_request(self, *args, **kwargs):
active = bool(self.request.method == 'GET' and self.admin_view.has_change_permission() and self.list_editable)
if active:
self.model_form = self.get_model_view(ModelFormAdminUtil, self.model).form_obj
return active
def result_item(self, item, obj, field_name, row):
if self.list_editable and item.field and item.field.editable and (field_name in self.list_editable):
pk = getattr(obj, obj._meta.pk.attname)
field_label = label_for_field(field_name, obj,
model_admin=self.admin_view,
return_attr=False
)
item.wraps.insert(0, '<span class="editable-field">%s</span>')
item.btns.append((
'<a class="editable-handler" title="%s" data-editable-field="%s" data-editable-loadurl="%s">' +
'<i class="fa fa-edit"></i></a>') %
(_(u"Enter %s") % field_label, field_name,
self.admin_view.model_admin_url('patch', pk) + '?fields=' + field_name))
if field_name not in self.editable_need_fields:
self.editable_need_fields[field_name] = item.field
return item
# Media
def get_media(self, media):
if self.editable_need_fields:
media = media + self.model_form.media + \
self.vendor(
'xadmin.plugin.editable.js', 'xadmin.widget.editable.css')
return media
class EditPatchView(ModelFormAdminView, ListAdminView):
def init_request(self, object_id, *args, **kwargs):
self.org_obj = self.get_object(unquote(object_id))
# For list view get new field display html
self.pk_attname = self.opts.pk.attname
if not self.has_change_permission(self.org_obj):
raise PermissionDenied
if self.org_obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(self.opts.verbose_name), 'key': escape(object_id)})
def get_new_field_html(self, f):
result = self.result_item(self.org_obj, f, {'is_display_first':
False, 'object': self.org_obj})
return mark_safe(result.text) if result.allow_tags else conditional_escape(result.text)
def _get_new_field_html(self, field_name):
try:
f, attr, value = lookup_field(field_name, self.org_obj, self)
except (AttributeError, ObjectDoesNotExist):
return EMPTY_CHANGELIST_VALUE
else:
allow_tags = False
if f is None:
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
text = boolean_icon(value)
else:
text = smart_text(value)
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(self.org_obj, f.name)
if field_val is None:
text = EMPTY_CHANGELIST_VALUE
else:
text = field_val
else:
text = display_for_field(value, f)
return mark_safe(text) if allow_tags else conditional_escape(text)
@filter_hook
def get(self, request, object_id):
model_fields = [f.name for f in self.opts.fields]
fields = [f for f in request.GET['fields'].split(',') if f in model_fields]
defaults = {
"form": self.form,
"fields": fields,
"formfield_callback": self.formfield_for_dbfield,
}
form_class = modelform_factory(self.model, **defaults)
form = form_class(instance=self.org_obj)
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
form.helper = helper
s = '{% load i18n crispy_forms_tags %}<form method="post" action="{{action_url}}">{% crispy form %}' + \
'<button type="submit" class="btn btn-success btn-block btn-sm">{% trans "Apply" %}</button></form>'
t = template.Template(s)
c = template.Context({'form': form, 'action_url': self.model_admin_url('patch', self.org_obj.pk)})
return HttpResponse(t.render(c))
@filter_hook
@csrf_protect_m
@transaction.atomic
def post(self, request, object_id):
model_fields = [f.name for f in self.opts.fields]
fields = [f for f in request.POST.keys() if f in model_fields]
defaults = {
"form": self.form,
"fields": fields,
"formfield_callback": self.formfield_for_dbfield,
}
form_class = modelform_factory(self.model, **defaults)
form = form_class(
instance=self.org_obj, data=request.POST, files=request.FILES)
result = {}
if form.is_valid():
form.save(commit=True)
result['result'] = 'success'
result['new_data'] = form.cleaned_data
result['new_html'] = dict(
[(f, self.get_new_field_html(f)) for f in fields])
else:
result['result'] = 'error'
result['errors'] = JsonErrorDict(form.errors, form).as_json()
return self.render_response(result)
site.register_plugin(EditablePlugin, ListAdminView)
site.register_modelview(r'^(.+)/patch/$', EditPatchView, name='%s_%s_patch')
| 42.160494
| 132
| 0.609224
|
7e4a24ffdfe6aad67bc9196651e1727fe83d2574
| 2,645
|
py
|
Python
|
homeassistant/components/camera/generic.py
|
maddox/home-assistant
|
6624cfefd6ea81b559085779173b91a3dc6bd349
|
[
"MIT"
] | 1
|
2015-09-13T21:10:09.000Z
|
2015-09-13T21:10:09.000Z
|
homeassistant/components/camera/generic.py
|
maddox/home-assistant
|
6624cfefd6ea81b559085779173b91a3dc6bd349
|
[
"MIT"
] | null | null | null |
homeassistant/components/camera/generic.py
|
maddox/home-assistant
|
6624cfefd6ea81b559085779173b91a3dc6bd349
|
[
"MIT"
] | 1
|
2020-05-07T08:48:36.000Z
|
2020-05-07T08:48:36.000Z
|
"""
homeassistant.components.camera.generic
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for IP Cameras.
This component provides basic support for IP cameras. For the basic support to
work you camera must support accessing a JPEG snapshot via a URL and you will
need to specify the "still_image_url" parameter which should be the location of
the JPEG image.
As part of the basic support the following features will be provided:
- MJPEG video streaming
- Saving a snapshot
- Recording(JPEG frame capture)
To use this component, add the following to your configuration.yaml file.
camera:
platform: generic
name: Door Camera
username: YOUR_USERNAME
password: YOUR_PASSWORD
still_image_url: http://YOUR_CAMERA_IP_AND_PORT/image.jpg
Variables:
still_image_url
*Required
The URL your camera serves the image on, eg. http://192.168.1.21:2112/
name
*Optional
This parameter allows you to override the name of your camera in Home
Assistant.
username
*Optional
The username for accessing your camera.
password
*Optional
The password for accessing your camera.
"""
import logging
from requests.auth import HTTPBasicAuth
from homeassistant.helpers import validate_config
from homeassistant.components.camera import DOMAIN
from homeassistant.components.camera import Camera
import requests
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Adds a generic IP Camera. """
if not validate_config({DOMAIN: config}, {DOMAIN: ['still_image_url']},
_LOGGER):
return None
add_devices_callback([GenericCamera(config)])
# pylint: disable=too-many-instance-attributes
class GenericCamera(Camera):
"""
A generic implementation of an IP camera that is reachable over a URL.
"""
def __init__(self, device_info):
super().__init__()
self._name = device_info.get('name', 'Generic Camera')
self._username = device_info.get('username')
self._password = device_info.get('password')
self._still_image_url = device_info['still_image_url']
def camera_image(self):
""" Return a still image reponse from the camera. """
if self._username and self._password:
response = requests.get(
self._still_image_url,
auth=HTTPBasicAuth(self._username, self._password))
else:
response = requests.get(self._still_image_url)
return response.content
@property
def name(self):
""" Return the name of this device. """
return self._name
| 28.75
| 79
| 0.710019
|
9282732ba80c8819a92ca826149f803d6f25a730
| 1,060
|
py
|
Python
|
jeu_educatif/python/c_vigenere.py
|
Charles-Svg/Projet_L3_jeu_educatif
|
841f70f1368117288128342258f5832ca9028161
|
[
"MIT"
] | null | null | null |
jeu_educatif/python/c_vigenere.py
|
Charles-Svg/Projet_L3_jeu_educatif
|
841f70f1368117288128342258f5832ca9028161
|
[
"MIT"
] | null | null | null |
jeu_educatif/python/c_vigenere.py
|
Charles-Svg/Projet_L3_jeu_educatif
|
841f70f1368117288128342258f5832ca9028161
|
[
"MIT"
] | null | null | null |
# Ce document est chiffré à l'aide de l'algorithme de vigenère
# Vous devrez coder une fonction de DECHIFFREMENT (et non de chiffrement) de vigenère prenant en paramètres :
# -Un mot/phrase à déchiffrer (en minuscule)
# -Une clé étant une chaine de caractères (en minuscule)
# et retournant le mot déchiffré (en minuscule)
#
# La définition de la fonction vous est déjà donnée, à vous de l'implémenter
#
# Vous devrez également trouver la valeur de la variable cle. Vous la trouverez peut-être en fouillant le contenu de votre pc
#
# Aides :
# - les fonctions ord(<char>)->int et chr(<int>)->char vous seront utiles
# - en ascii, le code des lettres minuscules est compris entre 97 (a) et 122 (z)
# - le modulo (noté "%") vous sera utile afin de boucler dans les indices de la clé
# - il ne vous est pas demandé de traiter la ponctuation, mais vous devez traiter les espaces
# - afin d'obtenir la longueur de la clé, il est possible d'utiliser la méthode len(key)
# A compléter
cle = None
def vigenere(word, key):
# A compléter
return
| 42.4
| 125
| 0.728302
|
e4c607b0f86a6fbc51108ab1f5c4814ef616d921
| 1,158
|
py
|
Python
|
src/features/cross_v1.py
|
yota-p/kaggle_jane-street-market-prediction
|
c39d65652d3ce531e99efbc50926cd95cba8b466
|
[
"MIT"
] | null | null | null |
src/features/cross_v1.py
|
yota-p/kaggle_jane-street-market-prediction
|
c39d65652d3ce531e99efbc50926cd95cba8b466
|
[
"MIT"
] | 8
|
2020-12-12T10:06:14.000Z
|
2020-12-30T07:09:52.000Z
|
src/features/cross_v1.py
|
yota-p/kaggle_jane-street-market-prediction
|
c39d65652d3ce531e99efbc50926cd95cba8b466
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pandas as pd
from src.util.get_environment import get_datadir
def main():
'''
Function:
- Reduce size of train.csv from 2.5GB to 600MB (On memory)
Note: Files except train.csv aren't reduced (small enough)
- Cut weight <=0
- Calculate action
- Create crossed features, such as:
- cross_41_42_43 = feature_41 + feature_42 + feature_43
- cross_1_2 = feature_1 / feature_2
Input:
- basic_v1/train.pkl
Output:
- cross_v1/train.pkl
'''
DATASET = 'cross_v1'
DATA_DIR = get_datadir()
IN_DIR = f'{DATA_DIR}/processed/basic_v1'
OUT_DIR = f'{DATA_DIR}/processed/{DATASET}'
Path(OUT_DIR).mkdir(exist_ok=True, parents=True)
df = pd.read_pickle(f'{IN_DIR}/train.pkl')
# feature engineering
df['cross_41_42_43'] = df['feature_41'] + df['feature_42'] + df['feature_43']
df['cross_1_2'] = df['feature_1'] / (df['feature_2'] + 1e-5)
df = df[['cross_41_42_43', 'cross_1_2']]
print(f'Created dataset {DATASET}')
print(f'Columns: {df.columns}')
df.to_pickle(f'{OUT_DIR}/train.pkl')
if __name__ == '__main__':
main()
| 27.571429
| 81
| 0.645941
|
97a83783f6f5f52107b773090b4e11f1497e6b82
| 1,746
|
py
|
Python
|
cieloApi3/payment.py
|
naripok/API-3.0-Python
|
3bdd27d321a03f4c761876f7907588a0f4726c70
|
[
"MIT"
] | null | null | null |
cieloApi3/payment.py
|
naripok/API-3.0-Python
|
3bdd27d321a03f4c761876f7907588a0f4726c70
|
[
"MIT"
] | null | null | null |
cieloApi3/payment.py
|
naripok/API-3.0-Python
|
3bdd27d321a03f4c761876f7907588a0f4726c70
|
[
"MIT"
] | null | null | null |
from .objectJSON import ObjectJSON
PAYMENTTYPE_CREDITCARD = "CreditCard"
PAYMENTTYPE_DEBITCARD = "DebitCard"
PAYMENTTYPE_ELECTRONIC_TRANSFER = "ElectronicTransfer"
PAYMENTTYPE_BOLETO = "Boleto"
PROVIDER_BRADESCO = "Bradesco"
PROVIDER_BANCO_DO_BRASIL = "BancoDoBrasil"
PROVIDER_SIMULADO = "Simulado"
class Payment(ObjectJSON):
def __init__(self, amount, installments=1, credit_card=None):
self.amount = amount
self.service_tax_amount = None
self.installments = installments
self.interest = None
self.capture = None
self.authenticate = None
self.recurrent = None
self.recurrent_payment = None
self.credit_card = credit_card
self.proof_of_sale = None
self.authorization_code = None
self.soft_descriptor = None
self.return_url = None
self.provider = None
self.payment_id = None
self.tid = None
self.type = None
self.received_date = None
self.captured_amount = None
self.captured_date = None
self.currency = None
self.country = None
self.return_code = None
self.return_message = None
self.status = None
self.links = None
self.extra_data_collection = None
self.expiration_date = None
self.url = None
self.number = None
self.bar_code_number = None
self.digitable_line = None
self.address = None
#Boleto
self.boleto_number = None
self.assignor = None
self.demonstrative = None
self.identification = None
self.instructions = None
def prepare(self):
if self.credit_card:
self.type = PAYMENTTYPE_CREDITCARD
| 27.714286
| 65
| 0.64433
|
77e5aa7f2fffd75c275be5a8d0db8fb86b29f672
| 4,848
|
py
|
Python
|
configs/vfnet/vfnet_RepVGG_4cls.py
|
HAOCHENYE/yehc_mmdet
|
491cc13c6ff769996b7a23b871b10f9a5a1c56fa
|
[
"Apache-2.0"
] | 1
|
2021-12-25T13:22:39.000Z
|
2021-12-25T13:22:39.000Z
|
configs/vfnet/vfnet_RepVGG_4cls.py
|
HAOCHENYE/yehc_mmdet
|
491cc13c6ff769996b7a23b871b10f9a5a1c56fa
|
[
"Apache-2.0"
] | null | null | null |
configs/vfnet/vfnet_RepVGG_4cls.py
|
HAOCHENYE/yehc_mmdet
|
491cc13c6ff769996b7a23b871b10f9a5a1c56fa
|
[
"Apache-2.0"
] | 1
|
2021-02-01T13:33:26.000Z
|
2021-02-01T13:33:26.000Z
|
dataset_type = 'CocoDataset'
data_root = '/media/traindata/coco/'
base_lr = 0.32
warmup_iters = 2000
model = dict(
type='GFL',
backbone=dict(
type='RepVGGNet',
stem_channels=64,
stage_channels=(32, 64, 72, 96, 128, 192),
block_per_stage=(1, 3, 6, 8, 6, 6),
),
neck=dict(
type='YeFPN',
in_channels=[64, 72, 96, 128, 192],
out_channels=64,
conv_cfg=dict(type="NormalConv",
info={"norm_cfg": None})),
bbox_head=dict(
type='VFNetDeployPrivateHead',
norm_cfg=dict(type='BN', requires_grad=True),
num_classes=4,
in_channels=64,
stacked_convs=2,
feat_channels=64,
strides=[8, 16, 32, 64, 128],
center_sampling=False,
dcn_on_last_conv=False,
use_atss=True,
use_vfl=True,
# bbox_coder=dict(_delete_=True, type='TBLRBBoxCoder', normalizer=4.0),
loss_cls=dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
loss_weight=1.0),
loss_bbox=dict(type='CIoULoss', loss_weight=1.5),
loss_bbox_refine=dict(type='CIoULoss', loss_weight=2.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100)
train_pipline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 960)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomRadiusBlur', prob=0.3, radius=5, std=0),
dict(type='PhotoMetricDistortion', brightness_delta=48),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[127.5, 127.5, 127.5],
std=[128, 128, 128],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
val_pipline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[127.5, 127.5, 127.5],
std=[128, 128, 128],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=36,
workers_per_gpu=4,
train=dict(
type='CocoDataset',
ann_file=data_root + "coco_half_person_81_train.json",
img_prefix=data_root + 'train2017/images',
classes=['person', 'bottle', 'chair', 'potted plant'],
pipeline=train_pipline),
val=dict(
type='CocoDataset',
ann_file=data_root + "coco_half_person_81_val.json",
img_prefix=data_root + 'val2017/images',
classes=['person', 'bottle', 'chair', 'potted plant'],
pipeline=val_pipline),
test=dict(
type='CocoDataset',
ann_file=data_root + "coco_half_person_81_val.json",
img_prefix=data_root + 'val2017/images',
classes=['person', 'bottle', 'chair', 'potted plant'],
pipeline=val_pipline))
evaluation = dict(interval=2, metric='bbox', classwise=True)
optimizer = dict(type='AdamW', lr=0.001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2000,
warmup_ratio=0.01,
step=[90, 110])
# learning policy
total_epochs = 120
checkpoint_config = dict(interval=1)
log_config = dict(
interval=20,
hooks=[dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')])
# custom_hooks = [dict(type="EMAHook", momentum=0.1, interval=2, warm_up=warmup_iters, resume_from=None, priority='HIGHEST')]
device_ids = range(0, 2)
dist_params = dict(backend='nccl')
log_level = 'INFO'
# work_dir = 'work_dirs/paa_atss_OSACSP_pafpn_private_SGD_lr0.32_cosine_ema'
work_dir = 'work_dirs/vfnet_RepVGG_4cls_81cls/'
load_from = None
resume_from = None
# resume_from = None
workflow = [('train', 1)]
gpu_ids = range(0, 2)
| 31.686275
| 125
| 0.570132
|
64e29fc6067a09faca5c76d319f97d043997ebdc
| 84
|
py
|
Python
|
yearmonth/__init__.py
|
bsnacks000/yearmonth
|
c6a6084931e6cc4696de5f8a7f8e48ceca83b944
|
[
"MIT"
] | null | null | null |
yearmonth/__init__.py
|
bsnacks000/yearmonth
|
c6a6084931e6cc4696de5f8a7f8e48ceca83b944
|
[
"MIT"
] | null | null | null |
yearmonth/__init__.py
|
bsnacks000/yearmonth
|
c6a6084931e6cc4696de5f8a7f8e48ceca83b944
|
[
"MIT"
] | null | null | null |
from ._version import VERSION
__version__=VERSION
from .yearmonth import YearMonth
| 16.8
| 32
| 0.845238
|
cec548d995aa2082b5ea273d61b0dec02ca439ef
| 12,882
|
py
|
Python
|
tests/testlinkaliasapi.py
|
rjw57/trafficdb
|
7c895e14a52c8c313981243e36732a5e8dcc909a
|
[
"MIT"
] | 1
|
2016-12-12T21:23:26.000Z
|
2016-12-12T21:23:26.000Z
|
tests/testlinkaliasapi.py
|
rjw57/trafficdb
|
7c895e14a52c8c313981243e36732a5e8dcc909a
|
[
"MIT"
] | null | null | null |
tests/testlinkaliasapi.py
|
rjw57/trafficdb
|
7c895e14a52c8c313981243e36732a5e8dcc909a
|
[
"MIT"
] | null | null | null |
import datetime
import json
import logging
import random
from sqlalchemy import func
from trafficdb.blueprint.api import PAGE_LIMIT
from trafficdb.models import *
from .fixtures import (
create_fake_link_aliases,
create_fake_links,
create_fake_observations,
)
from .util import ApiTestCase as TestCase, API_PREFIX, strip_url
log = logging.getLogger(__name__)
class TestLinkAliases(TestCase):
@classmethod
def create_fixtures(cls):
create_fake_links(link_count=100)
create_fake_link_aliases(alias_count=200)
def test_all_link_aliass(self):
log.info('Querying all link aliases')
n_aliases = 0
n_pages = 0
# Response should look like a JSON document of the following form:
# {
# "aliases": [
# {
# id: <string>,
# linkId: <string>,
# linkUrl: <url>,
# }
# ],
# "page": {
# "count": <number>,
# ?"next": <url>,
# },
# }
# Get all data one page at a time
url = API_PREFIX + '/aliases/'
while url is not None:
# Check we're not looping "forever"
assert n_pages < 20
log.info('GET {0}'.format(url))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIsNot(response.json, None)
self.assertIn('aliases', response.json)
self.assertIn('page', response.json)
aliases = response.json['aliases']
page = response.json['page']
log.info('Got {0} aliases'.format(len(aliases)))
log.info('Page structure: {0}'.format(page))
# Check each alias
for v in aliases:
self.assertIn('id', v)
self.assertIn('linkId', v)
self.assertIn('linkUrl', v)
n_aliases += len(aliases)
self.assertTrue(page['count'] == len(aliases))
n_pages += 1
if 'next' in page:
url = strip_url(page['next'])
else:
url = None
log.info('Got information on {0} alias(es)'.format(n_aliases))
self.assertEqual(n_aliases, 200)
def test_redirect(self):
# Non-canonical links URL should re-direct
url = API_PREFIX + '/aliases'
log.info('GET {0}'.format(url))
response = self.client.get(url)
self.assertEqual(response.status_code, 301)
def test_empty_document(self):
log.info('Querying page beyond alias maximum')
# rationale: using "Z" should be "above" any
# random link alias in the db given the ordering used by Postgres.
response = self.get_link_aliases(from_='Z')
page, aliases = self.parse_link_aliases_response(response)
self.assertEqual(len(aliases), 0)
self.assertNotIn('next', page)
def test_integer_from(self):
# In the case of aliases, names can be just about anything and
# so they could be an integer.
log.info('Querying page with integer from')
response = self.get_link_aliases(from_=0)
page, aliases = self.parse_link_aliases_response(response)
def test_negative_count(self):
request_count = -3
log.info('Querying {0} aliases'.format(request_count))
response = self.get_link_aliases(count=request_count)
# -ve counts should return bad request
self.assertEqual(response.status_code, 400)
def test_non_number_count(self):
request_count = 'one'
log.info('Querying {0} aliases'.format(request_count))
response = self.get_link_aliases(count=request_count)
# non-numeric counts should return bad request
self.assertEqual(response.status_code, 400)
def test_small_counts(self):
request_count = max(1,PAGE_LIMIT >> 1)
assert PAGE_LIMIT > request_count
log.info('Querying {0} aliases'.format(request_count))
response = self.get_link_aliases(count=request_count)
page, aliases = self.parse_link_aliases_response(response)
self.assertEqual(len(aliases), request_count)
self.assertEqual(len(aliases), page['count'])
def test_huge_counts(self):
log.info('Querying 100 aliases (should be truncated)')
request_count = PAGE_LIMIT * 4
log.info('Querying {0} aliases'.format(request_count))
response = self.get_link_aliases(count=request_count)
page, aliases = self.parse_link_aliases_response(response)
self.assertEqual(len(aliases), page['count'])
self.assertTrue(len(aliases) == PAGE_LIMIT)
def test_non_json_resolve_body(self):
response = self.client.post(API_PREFIX + '/aliases/resolve',
data='not a json document', content_type='application/json')
self.assert_400(response)
def test_empty_json_resolve_body(self):
response = self.client.post(API_PREFIX + '/aliases/resolve',
data='{}', content_type='application/json')
self.assert_400(response)
def test_bad_alias_list_resolve_body_1(self):
response = self.client.post(API_PREFIX + '/aliases/resolve',
data='{"aliases": 3}', content_type='application/json')
self.assert_400(response)
def test_bad_alias_list_resolve_body_1(self):
response = self.client.post(API_PREFIX + '/aliases/resolve',
data='{"aliases": ["one", 3]}', content_type='application/json')
self.assert_400(response)
def test_bad_content_type_resolve_body(self):
response = self.client.post(API_PREFIX + '/aliases/resolve',
data='{"aliases": []}', content_type='text/plain')
self.assert_400(response)
def test_empty_resolve(self):
response = self.make_resolve_link_aliases_request([])
self.assert_200(response)
self.assertIn('resolutions', response.json)
resolutions = response.json['resolutions']
self.assertEqual(len(resolutions), 0)
def gen_alias_names(self, good_count=3, bad_count=3):
good_alias_names = set(r[0] for r in db.session.query(LinkAlias.name))
bad_alias_names = set('_bad_alias_{0}'.format(x) for x in range(bad_count))
alias_names = random.sample(good_alias_names, good_count) + list(bad_alias_names)
random.shuffle(alias_names)
return dict((n, n in good_alias_names) for n in alias_names)
def test_simple_resolve(self):
alias_name_map = self.gen_alias_names()
query_names = list(alias_name_map.keys())
log.info('Querying aliases: {0}'.format(query_names))
response = self.make_resolve_link_aliases_request(query_names)
self.assert_200(response)
self.assertIn('resolutions', response.json)
resolutions = response.json['resolutions']
log.info('Resolutions: {0}'.format(resolutions))
self.assertEqual(len(resolutions), len(query_names))
for name, res in zip(query_names, resolutions):
res_name, res_link = res
self.assertEqual(name, res_name)
if alias_name_map[name]:
# good link
self.assertIsNotNone(res_link)
self.assertIn('id', res_link)
self.assertIn('url', res_link)
else:
# bad link
self.assertIsNone(res_link)
def test_single_good_resolve(self):
alias_name_map = self.gen_alias_names(good_count=1, bad_count=0)
query_names = list(alias_name_map.keys())
log.info('Querying aliases: {0}'.format(query_names))
response = self.make_resolve_link_aliases_request(query_names)
self.assert_200(response)
self.assertIn('resolutions', response.json)
resolutions = response.json['resolutions']
log.info('Resolutions: {0}'.format(resolutions))
self.assertEqual(len(resolutions), len(query_names))
for name, res in zip(query_names, resolutions):
res_name, res_link = res
self.assertEqual(name, res_name)
if alias_name_map[name]:
# good link
self.assertIsNotNone(res_link)
self.assertIn('id', res_link)
self.assertIn('url', res_link)
else:
# bad link
self.assertIsNone(res_link)
def test_single_bad_resolve(self):
alias_name_map = self.gen_alias_names(good_count=0, bad_count=1)
query_names = list(alias_name_map.keys())
log.info('Querying aliases: {0}'.format(query_names))
response = self.make_resolve_link_aliases_request(query_names)
self.assert_200(response)
self.assertIn('resolutions', response.json)
resolutions = response.json['resolutions']
log.info('Resolutions: {0}'.format(resolutions))
self.assertEqual(len(resolutions), len(query_names))
for name, res in zip(query_names, resolutions):
res_name, res_link = res
self.assertEqual(name, res_name)
if alias_name_map[name]:
# good link
self.assertIsNotNone(res_link)
self.assertIn('id', res_link)
self.assertIn('url', res_link)
else:
# bad link
self.assertIsNone(res_link)
def test_too_big_resolve(self):
alias_name_map = self.gen_alias_names(good_count=PAGE_LIMIT, bad_count=PAGE_LIMIT)
query_names = list(alias_name_map.keys())
log.info('Querying aliases: {0}'.format(query_names))
response = self.make_resolve_link_aliases_request(query_names)
self.assert_400(response)
ALIASES_PATH = API_PREFIX + '/aliases/'
class TestMutation(TestCase):
@classmethod
def create_fixtures(cls):
create_fake_links(link_count=20)
def new_alias_request(self, link_data):
return self.client.patch(ALIASES_PATH,
data=json.dumps(link_data),
content_type='application/json')
def test_empty_body_request(self):
response = self.client.patch(ALIASES_PATH, data='', content_type='application/json')
self.assert_400(response)
def test_non_json_body_request(self):
response = self.client.patch(ALIASES_PATH, data='not json', content_type='application/json')
self.assert_400(response)
def test_no_content_type_body_request(self):
response = self.client.patch(ALIASES_PATH, data='{}')
self.assert_400(response)
def test_empty_request(self):
response = self.new_alias_request({})
self.assert_200(response)
def verify_create(self, create, response):
self.assert_200(response)
self.assertIn('create', response.json)
create_resp = response.json['create']
self.assertEqual(create_resp['count'], len(create))
# Verify by resolving
response = self.make_resolve_link_aliases_request(
list(cr['name'] for cr in create)
)
self.assert_200(response)
self.assertIn('resolutions', response.json)
resolutions = response.json['resolutions']
self.assertEqual(len(resolutions), len(create))
# What do we expect?
expected = {}
for cr in create:
expected[cr['name']] = cr['link']
log.info('resolutions: {0}'.format(resolutions))
log.info('expected: {0}'.format(expected))
for r_n, r_l in resolutions:
self.assertIn(r_n, expected)
self.assertEqual(r_l['id'], expected[r_n])
def test_create_single(self):
create = [
dict(name='new-alias', link=self.get_some_link_id()),
]
log.info('Sending create request: {0}'.format(create))
response = self.new_alias_request(dict(create=create))
self.verify_create(create, response)
def test_create_multiple_identical(self):
create = [
dict(name='new-alias-1', link=self.get_some_link_id()),
dict(name='new-alias-1', link=self.get_some_link_id()),
dict(name='new-alias-1', link=self.get_some_link_id()),
]
log.info('Sending create request: {0}'.format(create))
response = self.new_alias_request(dict(create=create))
self.assert_400(response)
def test_create_multiple(self):
create = [
dict(name='new-alias-1', link=self.get_some_link_id()),
dict(name='new-alias-2', link=self.get_some_link_id()),
dict(name='new-alias-3', link=self.get_some_link_id()),
]
log.info('Sending create request: {0}'.format(create))
response = self.new_alias_request(dict(create=create))
self.verify_create(create, response)
| 38.684685
| 100
| 0.627077
|
7c6dac2314617e6e594af5f95ddf3f69ec127758
| 968
|
py
|
Python
|
sols/1013.py
|
Paul11100/LeetCode
|
9896c579dff1812c0c76964db8d60603ee715e35
|
[
"MIT"
] | null | null | null |
sols/1013.py
|
Paul11100/LeetCode
|
9896c579dff1812c0c76964db8d60603ee715e35
|
[
"MIT"
] | null | null | null |
sols/1013.py
|
Paul11100/LeetCode
|
9896c579dff1812c0c76964db8d60603ee715e35
|
[
"MIT"
] | null | null | null |
class Solution:
# Greedy Two Pointers (Accepted), O(n) time, O(1) space
def canThreePartsEqualSum(self, arr: List[int]) -> bool:
if len(arr) < 3:
return False
total = sum(arr)
target = total / 3
a, b = 0, len(arr)-1
a_tot = b_tot = 0
while a_tot != target or a == 0:
if a >= len(arr):
return False
a_tot += arr[a]
a += 1
while b_tot != target or b == len(arr)-1:
if b < 0:
return False
b_tot += arr[b]
b -= 1
return a <= b
# Count Valid Parts (Top Voted), O(n) time, O(1) space
def canThreePartsEqualSum(self, A: List[int]) -> bool:
average, remainder, part, cnt = sum(A) // 3, sum(A) % 3, 0, 0
for a in A:
part += a
if part == average:
cnt += 1
part = 0
return not remainder and cnt >= 3
| 29.333333
| 69
| 0.452479
|
240843929a62873561804b718522912cbc4c751b
| 1,279
|
py
|
Python
|
core/src/Test/TestConfig.py
|
mkg20001/Fuzium
|
d424cd42a92272563fcba2290028c036cb7ce4a1
|
[
"MIT"
] | null | null | null |
core/src/Test/TestConfig.py
|
mkg20001/Fuzium
|
d424cd42a92272563fcba2290028c036cb7ce4a1
|
[
"MIT"
] | null | null | null |
core/src/Test/TestConfig.py
|
mkg20001/Fuzium
|
d424cd42a92272563fcba2290028c036cb7ce4a1
|
[
"MIT"
] | null | null | null |
import pytest
import Config
@pytest.mark.usefixtures("resetSettings")
class TestConfig:
def testParse(self):
# Defaults
config_test = Config.Config("zeronet.py".split(" "))
config_test.parse(silent=True, parse_config=False)
assert not config_test.debug
assert not config_test.debug_socket
# Test parse command line with unknown parameters (ui_password)
config_test = Config.Config("zeronet.py --debug --debug_socket --ui_password hello".split(" "))
config_test.parse(silent=True, parse_config=False)
assert config_test.debug
assert config_test.debug_socket
with pytest.raises(AttributeError):
config_test.ui_password
# More complex test
args = "zeronet.py --unknown_arg --debug --debug_socket --ui_restrict 127.0.0.1 1.2.3.4 "
args += "--another_unknown argument --use_openssl False siteSign address privatekey --inner_path users/content.json"
config_test = Config.Config(args.split(" "))
config_test.parse(silent=True, parse_config=False)
assert config_test.debug
assert "1.2.3.4" in config_test.ui_restrict
assert not config_test.use_openssl
assert config_test.inner_path == "users/content.json"
| 39.96875
| 124
| 0.686474
|
3dcfde01bf48050ced4e8088a4d8780e0d3970f0
| 16,645
|
py
|
Python
|
sdk/tables/azure-data-tables/tests/_shared/testcase.py
|
KarishmaGhiya/azure-sdk-for-python
|
1216acf1caa13575d3b8cfa0b401e42eefa9f17f
|
[
"MIT"
] | 1
|
2020-08-17T14:40:09.000Z
|
2020-08-17T14:40:09.000Z
|
sdk/tables/azure-data-tables/tests/_shared/testcase.py
|
KarishmaGhiya/azure-sdk-for-python
|
1216acf1caa13575d3b8cfa0b401e42eefa9f17f
|
[
"MIT"
] | null | null | null |
sdk/tables/azure-data-tables/tests/_shared/testcase.py
|
KarishmaGhiya/azure-sdk-for-python
|
1216acf1caa13575d3b8cfa0b401e42eefa9f17f
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from __future__ import division
from contextlib import contextmanager
import copy
import inspect
import os
import os.path
import time
from datetime import datetime, timedelta
from azure.data.tables import ResourceTypes, AccountSasPermissions
from azure.data.tables._table_shared_access_signature import generate_account_sas
try:
import unittest.mock as mock
except ImportError:
import mock
import zlib
import math
import sys
import string
import random
import re
import logging
from devtools_testutils import (
AzureMgmtTestCase,
AzureMgmtPreparer,
ResourceGroupPreparer,
StorageAccountPreparer,
FakeResource,
)
from azure_devtools.scenario_tests import RecordingProcessor, AzureTestError, create_random_name
try:
from cStringIO import StringIO # Python 2
except ImportError:
from io import StringIO
from azure.core.credentials import AccessToken
#from azure.data.tabless import generate_account_sas, AccountSasPermissions, ResourceTypes
from azure.mgmt.storage.models import StorageAccount, Endpoints
try:
from devtools_testutils import mgmt_settings_real as settings
except ImportError:
from devtools_testutils import mgmt_settings_fake as settings
import pytest
LOGGING_FORMAT = '%(asctime)s %(name)-20s %(levelname)-5s %(message)s'
class FakeTokenCredential(object):
"""Protocol for classes able to provide OAuth tokens.
:param str scopes: Lets you specify the type of access needed.
"""
def __init__(self):
self.token = AccessToken("YOU SHALL NOT PASS", 0)
def get_token(self, *args):
return self.token
class XMSRequestIDBody(RecordingProcessor):
"""This process is used for Storage batch call only, to avoid the echo policy.
"""
def process_response(self, response):
content_type = None
for key, value in response.get('headers', {}).items():
if key.lower() == 'content-type':
content_type = (value[0] if isinstance(value, list) else value).lower()
break
if content_type and 'multipart/mixed' in content_type:
response['body']['string'] = re.sub(b"x-ms-client-request-id: [a-f0-9-]+\r\n", b"", response['body']['string'])
return response
class GlobalStorageAccountPreparer(AzureMgmtPreparer):
def __init__(self):
super(GlobalStorageAccountPreparer, self).__init__(
name_prefix='',
random_name_length=42
)
def create_resource(self, name, **kwargs):
storage_account = TableTestCase._STORAGE_ACCOUNT
if self.is_live:
self.test_class_instance.scrubber.register_name_pair(
storage_account.name,
"storagename"
)
else:
name = "storagename"
storage_account.name = name
storage_account.primary_endpoints.table = 'https://{}.{}.core.windows.net'.format(name, 'table')
return {
'location': 'westus',
'resource_group': TableTestCase._RESOURCE_GROUP,
'storage_account': storage_account,
'storage_account_key': TableTestCase._STORAGE_KEY,
'storage_account_cs': TableTestCase._STORAGE_CONNECTION_STRING,
}
class GlobalResourceGroupPreparer(AzureMgmtPreparer):
def __init__(self):
super(GlobalResourceGroupPreparer, self).__init__(
name_prefix='',
random_name_length=42
)
def create_resource(self, name, **kwargs):
rg = TableTestCase._RESOURCE_GROUP
if self.is_live:
self.test_class_instance.scrubber.register_name_pair(
rg.name,
"rgname"
)
else:
rg = FakeResource(
name="rgname",
id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rgname"
)
return {
'location': 'westus',
'resource_group': rg,
}
class TableTestCase(AzureMgmtTestCase):
def __init__(self, *args, **kwargs):
super(TableTestCase, self).__init__(*args, **kwargs)
self.replay_processors.append(XMSRequestIDBody())
def connection_string(self, account, key):
return "DefaultEndpointsProtocol=https;AccountName=" + account.name + ";AccountKey=" + str(key) + ";EndpointSuffix=core.windows.net"
def account_url(self, account, endpoint_type):
"""Return an url of storage account.
:param str storage_account: Storage account name
:param str storage_type: The Storage type part of the URL. Should be "blob", or "queue", etc.
"""
try:
if endpoint_type == "table":
return account.primary_endpoints.table.rstrip("/")
if endpoint_type == "cosmos":
return "https://{}.table.cosmos.azure.com".format(account.name)
else:
raise ValueError("Unknown storage type {}".format(storage_type))
except AttributeError: # Didn't find "primary_endpoints"
return 'https://{}.{}.core.windows.net'.format(account, endpoint_type)
def configure_logging(self):
try:
enable_logging = self.get_settings_value("ENABLE_LOGGING")
except AzureTestError:
enable_logging = True # That's the default value in fake settings
self.enable_logging() if enable_logging else self.disable_logging()
def enable_logging(self):
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(LOGGING_FORMAT))
self.logger.handlers = [handler]
self.logger.setLevel(logging.INFO)
self.logger.propagate = True
self.logger.disabled = False
def disable_logging(self):
self.logger.propagate = False
self.logger.disabled = True
self.logger.handlers = []
def sleep(self, seconds):
if self.is_live:
time.sleep(seconds)
def get_random_bytes(self, size):
# recordings don't like random stuff. making this more
# deterministic.
return b'a'*size
def get_random_text_data(self, size):
'''Returns random unicode text data exceeding the size threshold for
chunking blob upload.'''
checksum = zlib.adler32(self.qualified_test_name.encode()) & 0xffffffff
rand = random.Random(checksum)
text = u''
words = [u'hello', u'world', u'python', u'啊齄丂狛狜']
while (len(text) < size):
index = int(rand.random()*(len(words) - 1))
text = text + u' ' + words[index]
return text
@staticmethod
def _set_test_proxy(service, settings):
if settings.USE_PROXY:
service.set_proxy(
settings.PROXY_HOST,
settings.PROXY_PORT,
settings.PROXY_USER,
settings.PROXY_PASSWORD,
)
def assertNamedItemInContainer(self, container, item_name, msg=None):
def _is_string(obj):
if sys.version_info >= (3,):
return isinstance(obj, str)
else:
return isinstance(obj, basestring)
for item in container:
if _is_string(item):
if item == item_name:
return
elif item.name == item_name:
return
elif hasattr(item, 'snapshot') and item.snapshot == item_name:
return
standardMsg = '{0} not found in {1}'.format(
repr(item_name), [str(c) for c in container])
self.fail(self._formatMessage(msg, standardMsg))
def assertNamedItemNotInContainer(self, container, item_name, msg=None):
for item in container:
if item.name == item_name:
standardMsg = '{0} unexpectedly found in {1}'.format(
repr(item_name), repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def generate_oauth_token(self):
if self.is_live:
from azure.identity import ClientSecretCredential
return ClientSecretCredential(
self.get_settings_value("TENANT_ID"),
self.get_settings_value("CLIENT_ID"),
self.get_settings_value("CLIENT_SECRET"),
)
return self.generate_fake_token()
def generate_sas_token(self):
fake_key = 'a'*30 + 'b'*30
return '?' + generate_account_sas(
account_name = 'test', # name of the storage account
account_key = fake_key, # key for the storage account
resource_types = ResourceTypes(object=True),
permission = AccountSasPermissions(read=True,list=True),
start = datetime.now() - timedelta(hours = 24),
expiry = datetime.now() + timedelta(days = 8)
)
def generate_fake_token(self):
return FakeTokenCredential()
def not_for_emulator(test):
def skip_test_if_targeting_emulator(self):
test(self)
return skip_test_if_targeting_emulator
class RetryCounter(object):
def __init__(self):
self.count = 0
def simple_count(self, retry_context):
self.count += 1
class ResponseCallback(object):
def __init__(self, status=None, new_status=None):
self.status = status
self.new_status = new_status
self.first = True
self.count = 0
def override_first_status(self, response):
if self.first and response.http_response.status_code == self.status:
response.http_response.status_code = self.new_status
self.first = False
self.count += 1
def override_status(self, response):
if response.http_response.status_code == self.status:
response.http_response.status_code = self.new_status
self.count += 1
class LogCaptured(object):
def __init__(self, test_case=None):
# accept the test case so that we may reset logging after capturing logs
self.test_case = test_case
def __enter__(self):
# enable logging
# it is possible that the global logging flag is turned off
self.test_case.enable_logging()
# create a string stream to send the logs to
self.log_stream = StringIO()
# the handler needs to be stored so that we can remove it later
self.handler = logging.StreamHandler(self.log_stream)
self.handler.setFormatter(logging.Formatter(LOGGING_FORMAT))
# get and enable the logger to send the outputs to the string stream
self.logger = logging.getLogger('azure.storage')
self.logger.level = logging.DEBUG
self.logger.addHandler(self.handler)
# the stream is returned to the user so that the capture logs can be retrieved
return self.log_stream
def __exit__(self, exc_type, exc_val, exc_tb):
# stop the handler, and close the stream to exit
self.logger.removeHandler(self.handler)
self.log_stream.close()
# reset logging since we messed with the setting
self.test_case.configure_logging()
@pytest.fixture(scope="session")
def storage_account():
test_case = AzureMgmtTestCase("__init__")
rg_preparer = ResourceGroupPreparer(random_name_enabled=True, name_prefix='pystorage')
storage_preparer = StorageAccountPreparer(random_name_enabled=True, name_prefix='pyacrstorage')
# Create
subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID", None)
location = os.environ.get("AZURE_LOCATION", "westus")
existing_rg_name = os.environ.get("AZURE_RESOURCEGROUP_NAME")
existing_storage_name = os.environ.get("AZURE_STORAGE_ACCOUNT_NAME")
existing_storage_key = os.environ.get("AZURE_STORAGE_ACCOUNT_KEY")
storage_connection_string = os.environ.get("AZURE_STORAGE_CONNECTION_STRING")
i_need_to_create_rg = not (existing_rg_name or existing_storage_name or storage_connection_string)
got_storage_info_from_env = existing_storage_name or storage_connection_string
try:
if i_need_to_create_rg:
rg_name, rg_kwargs = rg_preparer._prepare_create_resource(test_case)
rg = rg_kwargs['resource_group']
else:
rg_name = existing_rg_name or "no_rg_needed"
rg = FakeResource(
name=rg_name,
id="/subscriptions/{}/resourceGroups/{}".format(subscription_id, rg_name)
)
TableTestCase._RESOURCE_GROUP = rg
try:
if got_storage_info_from_env:
if storage_connection_string:
storage_connection_string_parts = dict([
part.split('=', 1)
for part in storage_connection_string.split(";")
])
storage_account = None
if existing_storage_name:
storage_name = existing_storage_name
storage_account = StorageAccount(
location=location,
)
storage_account.name = storage_name
storage_account.id = storage_name
storage_account.primary_endpoints=Endpoints()
storage_account.primary_endpoints.table = 'https://{}.{}.core.windows.net'.format(storage_name, 'table')
storage_key = existing_storage_key
if not storage_connection_string:
# It means I have received a storage name from env
storage_connection_string=";".join([
"DefaultEndpointsProtocol=https",
"AccountName={}".format(storage_name),
"AccountKey={}".format(storage_key),
"TableEndpoint={}".format(storage_account.primary_endpoints.table),
])
if not storage_account:
# It means I have received a connection string
storage_name = storage_connection_string_parts["AccountName"]
storage_account = StorageAccount(
location=location,
)
def build_service_endpoint(service):
try:
suffix = storage_connection_string_parts["EndpointSuffix"]
except KeyError:
suffix = "cosmos.azure.com"
return "{}://{}.{}.{}".format(
storage_connection_string_parts.get("DefaultEndpointsProtocol", "https"),
storage_connection_string_parts["AccountName"],
service,
suffix
)
storage_account.name = storage_name
storage_account.id = storage_name
storage_account.primary_endpoints=Endpoints()
storage_account.primary_endpoints.table = storage_connection_string_parts.get("TableEndpoint", build_service_endpoint("table"))
storage_account.secondary_endpoints=Endpoints()
storage_account.secondary_endpoints.table = storage_connection_string_parts.get("TableSecondaryEndpoint", build_service_endpoint("table"))
storage_key = storage_connection_string_parts["AccountKey"]
else:
storage_name, storage_kwargs = storage_preparer._prepare_create_resource(test_case, **rg_kwargs)
storage_account = storage_kwargs['storage_account']
storage_key = storage_kwargs['storage_account_key']
storage_connection_string = storage_kwargs['storage_account_cs']
TableTestCase._STORAGE_ACCOUNT = storage_account
TableTestCase._STORAGE_KEY = storage_key
TableTestCase._STORAGE_CONNECTION_STRING = storage_connection_string
yield
finally:
if not got_storage_info_from_env:
storage_preparer.remove_resource(
storage_name,
resource_group=rg
)
finally:
if i_need_to_create_rg:
rg_preparer.remove_resource(rg_name)
TableTestCase._RESOURCE_GROUP = None
| 37.573363
| 158
| 0.623791
|
ca72a3698829de734909500a0adb50ec78e6666b
| 74,705
|
py
|
Python
|
torch/quantization/fx/quantize.py
|
guoyejun/pytorch
|
57cba8e60116bfff37d10bc2b4596d8c478ffd0a
|
[
"Intel"
] | null | null | null |
torch/quantization/fx/quantize.py
|
guoyejun/pytorch
|
57cba8e60116bfff37d10bc2b4596d8c478ffd0a
|
[
"Intel"
] | 1
|
2021-04-12T19:49:08.000Z
|
2021-04-12T19:49:08.000Z
|
torch/quantization/fx/quantize.py
|
shmsong/pytorch
|
90e532f3ef17a9611e9e7a9f1f6189d4168bf084
|
[
"Intel"
] | 1
|
2022-02-23T02:34:50.000Z
|
2022-02-23T02:34:50.000Z
|
import torch
from torch.fx import (
GraphModule,
Proxy,
map_arg
)
from torch.fx.graph import (
Graph,
Node,
)
from torch.fx.node import Argument
from torch.quantization import (
propagate_qconfig_,
convert,
)
from ..quantization_mappings import (
get_default_qat_module_mappings,
)
from ..quantize import (
_remove_qconfig,
is_activation_post_process
)
from ..utils import (
get_combined_dict,
get_qconfig_dtypes,
get_swapped_custom_module_class,
weight_is_quantized,
activation_is_statically_quantized,
activation_is_int8_quantized,
activation_dtype,
weight_dtype,
)
from .pattern_utils import (
is_match,
get_default_quant_patterns,
get_default_output_activation_post_process_map,
input_output_observed,
Pattern,
)
from .graph_module import (
is_observed_module,
is_observed_standalone_module,
ObservedGraphModule,
ObservedStandaloneGraphModule,
QuantizedGraphModule,
)
from .quantization_patterns import (
binary_op_supported_dtypes,
binary_reference_op_supported_dtypes,
BinaryOpQuantizeHandler,
CatQuantizeHandler,
CopyNodeQuantizeHandler,
CustomModuleQuantizeHandler,
DefaultQuantizeHandler,
FixedQParamsOpQuantizeHandler,
QuantizeHandler,
StandaloneModuleQuantizeHandler,
)
from .utils import (
_parent_name,
all_node_args_have_no_tensors,
quantize_node,
get_custom_module_class_keys,
get_new_attr_name_with_prefix,
collect_producer_nodes,
graph_module_from_producer_nodes,
assert_and_get_unique_device,
node_return_type_is_int,
)
from .qconfig_utils import (
convert_dict_to_ordered_dict,
get_flattened_qconfig_dict,
get_object_type_qconfig,
get_qconfig,
QConfigAny,
)
import operator
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
# Define helper types
MatchResult = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler,
QConfigAny]
# ------------------------
# Helper Functions
# ------------------------
def insert_observer(
node: Node, observer: torch.quantization.ObserverBase,
model: torch.nn.Module,
activation_post_process_map: Dict[str, List[str]],
activation_post_process_indexes: Dict[str, int],
env: Dict[Any, Any], observed_graph: Graph, load_arg: Callable,
observed_node_names_set: Set[str],
quants: Dict[str, List[Tuple[DefaultQuantizeHandler, Callable]]]):
"""Insert observer for node by modifying the observed_graph and
attach observer module to the model
Args:
node: Node
observer: observer/fake_quantize module instance
"""
# In eval mode fixed qparams node are the same as CopyNode and we
# won't insert observer for them
if not model.training and isinstance(observer, torch.quantization.FixedQParamsFakeQuantize):
return
# respect device affinity when adding observers
model_device = assert_and_get_unique_device(model)
if model_device:
observer.to(model_device)
# add observer module as attribute
prefix = node.name + '_activation_post_process_'
get_new_observer_name = get_new_attr_name_with_prefix(prefix)
observer_name = get_new_observer_name(model)
setattr(model, observer_name, observer)
# put observer instance activation_post_process map
activation_post_process_map[node.name].append(observer_name)
# initialize index map for activation_post_process
if node.name not in activation_post_process_indexes:
activation_post_process_indexes[node.name] = 0
# insert observer call
env[node.name] = observed_graph.create_node(
'call_module', observer_name, (load_arg(node),), {})
observed_node_names_set.add(node.name)
def maybe_insert_observer_for_special_module(
quantize_handler: QuantizeHandler, modules: Dict[str, torch.nn.Module],
prepare_custom_config_dict: Any, qconfig: Any, node: Node) -> Optional[List[int]]:
""" Insert observer for custom module and standalone module
Returns: standalone_module_input_idxs: the indexs for inputs that
needs to be observed by parent module
"""
assert modules is not None
standalone_module_input_idxs = None
if isinstance(quantize_handler, CustomModuleQuantizeHandler):
custom_module = modules[node.target] # type: ignore[index]
custom_module_class_mapping = prepare_custom_config_dict.get(
"float_to_observed_custom_module_class", {})
observed_custom_module_class = \
get_swapped_custom_module_class(
custom_module, custom_module_class_mapping, qconfig)
observed_custom_module = \
observed_custom_module_class.from_float(custom_module)
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, observed_custom_module)
elif isinstance(quantize_handler, StandaloneModuleQuantizeHandler):
# observe standalone module
standalone_module = modules[node.target] # type: ignore[index]
standalone_module_name_configs = prepare_custom_config_dict.get("standalone_module_name", [])
standalone_module_class_configs = prepare_custom_config_dict.get("standalone_module_class", [])
class_config_map = {x[0]: (x[1], x[2]) for x in standalone_module_class_configs}
name_config_map = {x[0]: (x[1], x[2]) for x in standalone_module_name_configs}
config = class_config_map.get(type(standalone_module), (None, None))
config = name_config_map.get(node.target, config)
sm_qconfig_dict = {"": qconfig} if config[0] is None else config[0]
sm_prepare_config_dict = {} if config[1] is None else config[1]
prepare = \
torch.quantization.quantize_fx._prepare_standalone_module_fx # type: ignore[attr-defined]
observed_standalone_module = \
prepare(standalone_module, sm_qconfig_dict, sm_prepare_config_dict)
standalone_module_input_idxs = \
observed_standalone_module._standalone_module_input_quantized_idxs.int().tolist()
observed_standalone_module = ObservedStandaloneGraphModule(
observed_standalone_module, observed_standalone_module.graph)
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name,
observed_standalone_module)
modules[node.target] = observed_standalone_module # type: ignore[index]
return standalone_module_input_idxs
def insert_observer_for_output_of_the_node(
node: Node,
quantize_handler: QuantizeHandler,
qconfig: Any,
modules: Dict[str, torch.nn.Module],
model: torch.nn.Module,
pattern: Any,
activation_post_process_map: Dict[str, List[str]],
activation_post_process_indexes: Dict[str, int],
env: Dict[Any, Any],
observed_graph: Graph,
load_arg: Callable,
observed_node_names_set: Set[str],
matched_nodes: Optional[List[Node]],
standalone_module_input_idxs: Optional[List[int]],
quants: Dict[str, List[Tuple[DefaultQuantizeHandler, Callable]]]):
""" Insert observer/fake_quantize module for output of the observed
module if needed
"""
# don't need to insert observer for output if activation does not
# need to be statically quantized
assert modules is not None
# TODO: Add warnings in the quantize handlers that does not support fp16 quantization
inserted_observer = False
if activation_is_statically_quantized(qconfig):
if isinstance(quantize_handler, FixedQParamsOpQuantizeHandler) \
and model.training:
# we only insert fake quantize module in qat
assert pattern is not None
if activation_dtype(qconfig) == torch.float16:
activation_post_process_ctr = qconfig.activation
else:
activation_post_process_ctr = \
get_default_output_activation_post_process_map().get(
pattern, None)
assert activation_post_process_ctr is not None, \
"activation_post_process constructor not provided " + \
"for pattern:" + str(pattern)
insert_observer(
node, activation_post_process_ctr(),
model, activation_post_process_map,
activation_post_process_indexes,
env, observed_graph,
load_arg, observed_node_names_set, quants)
inserted_observer = True
elif (isinstance(quantize_handler,
FixedQParamsOpQuantizeHandler) and
not model.training):
# inserting observers for output of observed module, or
# mark the output as observed
assert node.op in [
'call_module',
'call_function',
'call_method'], \
'FixedQParamsQuantizeHandler of type ' + node.op + ' is not handled'
def is_observed(input_arg):
if isinstance(input_arg, Node):
return input_arg.name in observed_node_names_set
elif isinstance(input_arg, list):
return all(map(is_observed, input_arg))
# insert observers for fixedqparams ops like sigmoid, since
# it supports fp16 static quantization
if isinstance(quantize_handler, FixedQParamsOpQuantizeHandler) and \
activation_dtype(qconfig) == torch.float16:
insert_observer(
node, qconfig.activation(),
model, activation_post_process_map,
activation_post_process_indexes,
env, observed_graph,
load_arg, observed_node_names_set, quants)
inserted_observer = True
else:
# propagate observed property from input
if is_observed(node.args[0]):
observed_node_names_set.add(node.name)
inserted_observer = True
elif (isinstance(quantize_handler, BinaryOpQuantizeHandler) and
quantize_handler.num_tensor_args == 1):
assert matched_nodes is not None
input_node = matched_nodes[-1] # first node in the sequence
def input_is_observed(arg):
return (isinstance(arg, Node) and
arg.name in observed_node_names_set)
# This is checking if one of the argument of add/mul
# is an observed node
# If both of the inputs are number,
# we will not consider the output to be observed
if (input_is_observed(input_node.args[0]) or
input_is_observed(input_node.args[1])):
observed_node_names_set.add(node.name)
inserted_observer = True
if activation_dtype(qconfig) == torch.float16:
# observer for outputs
new_observer = qconfig.activation()
insert_observer(
node, new_observer, model,
activation_post_process_map,
activation_post_process_indexes,
env, observed_graph,
load_arg, observed_node_names_set, quants)
inserted_observer = True
elif isinstance(quantize_handler,
StandaloneModuleQuantizeHandler):
assert node.op == "call_module"
assert isinstance(node.target, str)
sm_out_qidxs = modules[node.target]._standalone_module_output_quantized_idxs.tolist() # type: ignore[operator]
output_is_quantized = 0 in sm_out_qidxs
if output_is_quantized:
observed_node_names_set.add(node.name)
elif (quantize_handler.all_node_args_are_tensors and
input_output_observed(quantize_handler)):
# observer for outputs
new_observer = qconfig.activation()
insert_observer(
node, new_observer, model,
activation_post_process_map,
activation_post_process_indexes,
env, observed_graph,
load_arg, observed_node_names_set, quants)
inserted_observer = True
# insert observer for input of standalone module
if standalone_module_input_idxs is not None:
for idx in standalone_module_input_idxs:
if node.args[idx].name not in observed_node_names_set: # type: ignore[union-attr]
new_observer = qconfig.activation()
insert_observer(
node, new_observer, model,
activation_post_process_map,
activation_post_process_indexes,
env, observed_graph,
load_arg, observed_node_names_set, quants)
inserted_observer = True
# we already inserted activation_post_process for the outputvalue
# which is the same as the input value of the next op, so we
# can skip inserting one activation_post_process for the input
if node.name in quants and inserted_observer:
quants[node.name].pop(0)
def insert_observer_for_input_arg_of_observed_node(
node: Node, observed_node_names_set: Set[str],
quants: Dict[str, List[Tuple[DefaultQuantizeHandler, Callable]]],
model: torch.nn.Module,
activation_post_process_map: Dict[str, List[str]],
activation_post_process_indexes: Dict[str, int],
env: Dict[str, str], observed_graph: Graph,
load_arg: Callable):
if node.name in quants:
quant_act_ctrs = quants[node.name][:]
for _, activation_post_process_ctr in quant_act_ctrs:
if activation_post_process_ctr is not None:
insert_observer(
node, activation_post_process_ctr(),
model, activation_post_process_map,
activation_post_process_indexes,
env, observed_graph, load_arg, observed_node_names_set, quants)
def insert_observer_for_output_of_model(
node: Node,
model: torch.nn.Module,
qconfig_map: Dict[str, QConfigAny],
activation_post_process_map: Dict[str, List[str]],
activation_post_process_indexes: Dict[str, int],
env: Dict[Any, Any], observed_graph: Graph, load_arg: Callable,
observed_node_names_set: Set[str],
quants: Dict[str, List[Tuple[DefaultQuantizeHandler, Callable]]]):
if isinstance(node, Node):
assert qconfig_map is not None
local_qconfig = qconfig_map[node.name]
assert local_qconfig is not None, \
'qconfig of a node before a quantized output must exist'
if node.name not in observed_node_names_set:
insert_observer(
node, local_qconfig.activation(),
model,
activation_post_process_map,
activation_post_process_indexes,
env, observed_graph, load_arg, observed_node_names_set, quants)
elif isinstance(node, list) or isinstance(node, tuple):
for n in node:
insert_observer_for_output_of_model(
n,
model,
qconfig_map,
activation_post_process_map,
activation_post_process_indexes,
env, observed_graph, load_arg, observed_node_names_set, quants)
elif isinstance(node, dict):
for n in node.values():
insert_observer_for_output_of_model(
n,
model,
qconfig_map,
activation_post_process_map,
activation_post_process_indexes,
env, observed_graph, load_arg, observed_node_names_set, quants)
else:
raise Exception("hardcoding output to be quantized not supported: " + str(type(node)))
def insert_observers_for_model(
model: GraphModule,
modules: Dict[str, torch.nn.Module],
matches: Dict[str, MatchResult],
quants: Dict[str, List[Tuple[DefaultQuantizeHandler, Callable]]],
observed_node_names_set: Set[str],
qconfig_map: Dict[str, QConfigAny],
activation_post_process_map: Dict[str, List[str]],
activation_post_process_indexes: Dict[str, int],
observed_graph: Graph,
prepare_custom_config_dict: Dict[str, Any],
input_quantized_idxs: List[int],
output_quantized_idxs: List[int]) -> Optional[Node]:
env: Dict[Any, Any] = {}
def load_arg(a):
return map_arg(a, lambda node: env[node.name])
graph_inputs = [node.name for node in model.graph.nodes if node.op == "placeholder"]
get_new_observer_name = get_new_attr_name_with_prefix(
'activation_post_process_')
placeholder_node_seen_cnt = 0
output_node_seen_cnt = 0
result_node: Optional[Node] = None
for node in model.graph.nodes:
if node.op == 'output':
# If this output is hardcoded to be quantized, insert an
# observer on the previous node if it does not already
# exist.
cur_output_node_idx = output_node_seen_cnt
output_node_seen_cnt += 1
if cur_output_node_idx in output_quantized_idxs:
prev_node = node.args[0]
insert_observer_for_output_of_model(
prev_node,
model,
qconfig_map,
activation_post_process_map,
activation_post_process_indexes,
env, observed_graph, load_arg, observed_node_names_set, quants)
observed_graph.output(load_arg(node.args[0]))
result_node = node
continue
if node.name in observed_node_names_set:
continue
root_node, matched_nodes, pattern, obj, qconfig = matches.get(
node.name, (None, None, None, None, None))
env[node.name] = observed_graph.node_copy(node, load_arg)
if root_node is node:
# index for input of custom module that needs to be observed in
# parent
if qconfig is not None:
assert obj is not None
standalone_module_input_idxs = \
maybe_insert_observer_for_special_module(
obj, modules, prepare_custom_config_dict, qconfig,
node)
insert_observer_for_output_of_the_node(
node, obj, qconfig, modules, model, pattern,
activation_post_process_map,
activation_post_process_indexes,
env,
observed_graph, load_arg, observed_node_names_set,
matched_nodes, standalone_module_input_idxs, quants)
if node.op == 'placeholder':
# skip adding observers at the graph input if the input is
# overridden to be quantized
cur_placeholder_node_idx = placeholder_node_seen_cnt
placeholder_node_seen_cnt += 1
if cur_placeholder_node_idx in input_quantized_idxs:
observed_node_names_set.add(node.name)
continue
insert_observer_for_input_arg_of_observed_node(
node, observed_node_names_set, quants,
model, activation_post_process_map,
activation_post_process_indexes,
env,
observed_graph, load_arg)
return result_node
def in_nodes(a: Argument, nodes: Set[Node]) -> bool:
""" Checks if argument `a` is in the nodes set
if it is a list, check if all elements of a is in the nodes set
recursively
"""
if isinstance(a, Node):
return a in nodes
elif isinstance(a, list) or isinstance(a, tuple):
return all([in_nodes(arg, nodes) for arg in a])
return False
def handle_copy_nodes(
observed_graph: Graph, matches: Dict[str, MatchResult],
quants: Dict[str, List[Tuple[DefaultQuantizeHandler, Callable]]],
qconfig_map: Dict[str, QConfigAny],
activation_post_process_map: Dict[str, List[str]],
modules: Dict[str, torch.nn.Module]):
observed_nodes: Set[Node] = set()
copy_nodes: Set[Node] = set()
non_tensor_input_binary_op_nodes: Set[Node] = set()
unmatched_nodes: Set[Node] = set()
actpp_to_remove: Set[Node] = set()
env: Dict[Any, Any] = {}
def load_arg(a: Argument) -> Argument:
return map_arg(a, lambda node: env[node.name])
result_graph = Graph()
cache_for_no_tensor_check: Dict[Node, bool] = dict()
for node in observed_graph.nodes:
root_node, matched_nodes, pattern, quantize_handler, qconfig = matches.get(
node.name, (None, None, None, None, None))
if node.op == "call_module" and is_activation_post_process(modules[node.target]):
# rule 1: if the input of a copy node is observed, we won't need to
# insert observer for the output of copy node
if in_nodes(node.args[0], copy_nodes) and in_nodes(node.args[0], observed_nodes):
# we'll remove the activation_post_process if the previous node is
# an observed copy node
actpp_to_remove.add(node)
# rule 2: if the previous node is a binary op without tensor input, we can remove the observer
if in_nodes(node.args[0], non_tensor_input_binary_op_nodes):
actpp_to_remove.add(node)
observed_nodes.add(node)
if root_node is node and qconfig is not None:
if isinstance(quantize_handler, CopyNodeQuantizeHandler):
copy_nodes.add(node)
# if previous node is observed, the copy node will be observed as well
if in_nodes(node.args[0], observed_nodes):
prev_node = node.args[0]
if (
isinstance(prev_node, Node) and
prev_node.op == "call_module" and
is_activation_post_process(modules[prev_node.target]) # type: ignore[index]
):
prev_prev_node = prev_node.args[0]
# If previous node is unmatched, the input to copy node should not
# be observed. For example, in the pattern of
#
# user_node_unmatched -> obs -> copy_node_matched -> next_node
#
# we delete `obs`, because user_node_unmatched is not quantizeable,
# and the input to copy_node_matched does not need observation.
if in_nodes(prev_prev_node, unmatched_nodes):
actpp_to_remove.add(prev_node)
observed_nodes.remove(prev_node)
else:
observed_nodes.add(node)
else:
observed_nodes.add(node)
if all_node_args_have_no_tensors(node, modules, cache_for_no_tensor_check):
non_tensor_input_binary_op_nodes.add(node)
if root_node is None and node.op != 'placeholder':
unmatched_nodes.add(node)
# rule 3: for special node, we'll just remove observer for its input
special_nodes = [
("call_function", operator.getitem),
]
if (node.op, node.target) in special_nodes:
if in_nodes(node.args[0], observed_nodes):
prev_node = node.args[0].args[0]
if prev_node.name not in qconfig_map or qconfig_map[prev_node.name] is None:
actpp_to_remove.add(node.args[0])
# if the previous node is not quantized, remove node from copy nodes
if node in copy_nodes:
copy_nodes.remove(node)
for node in observed_graph.nodes:
if node.op == "output":
result_graph.output(map_arg(node.args[0], load_arg))
elif node in actpp_to_remove:
env[node.name] = env[node.args[0].name]
else:
env[node.name] = result_graph.node_copy(node, load_arg)
return result_graph
def handle_cat_nodes(
model: torch.nn.Module, observed_graph: Graph, matches: Dict[str, MatchResult],
quants: Dict[str, List[Tuple[DefaultQuantizeHandler, Callable]]],
activation_post_process_map: Dict[str, List[str]],
modules: Dict[str, torch.nn.Module]):
observed_nodes: Set[Node] = set()
# activation_post_process for cat nodes
actpp_for_cat_nodes: Dict[Node, torch.nn.Module] = dict()
# we'll set the activation_post_process for all tensor inputs of cat and output
# of cat to be the same (the activation_post_process for the first Tensor
# input in the list, for example, if we have:
# x = torch.cat([x1, x2, x3], ...)
# we'll set the activation_post_process for x1, x2, x3 and x to be the
# activation_post_proceess of x1:
# x1 -> obs1 -> cat -> obs1 -> ...
# /
# x2 -> obs1
#
# note that activation_post_process here needs to work for different
# Tensor dimensions, e.g. MinMaxObserver, HistogramObserver, per tensor FakeQuantize
for node in observed_graph.nodes:
root_node, matched_nodes, pattern, quantize_handler, qconfig = matches.get(
node.name, (None, None, None, None, None))
if node.op == "call_module" and is_activation_post_process(modules[node.target]):
observed_nodes.add(node)
if node.args[0] in actpp_for_cat_nodes:
parent_name, name = _parent_name(node.target)
actpp_for_cat = actpp_for_cat_nodes[node.args[0]]
setattr(modules[parent_name], name, actpp_for_cat)
if root_node is node and qconfig is not None:
if isinstance(quantize_handler, CatQuantizeHandler):
if in_nodes(node.args[0], observed_nodes):
# set the activation post process to be the same
# input 0 for cat node is a list
assert isinstance(node.args[0], list) or \
isinstance(node.args[0], tuple), \
"Expecting first input of cat to be a list or tuple"
first_act_post_process = modules[node.args[0][0].target]
for arg in node.args[0]:
assert arg.op == "call_module" and is_activation_post_process(modules[arg.target])
parent_name, name = _parent_name(arg.target)
setattr(modules[parent_name], name, first_act_post_process)
actpp_for_cat_nodes[node] = first_act_post_process
return observed_graph
# A dictionary for querying the weight index for a given op
WEIGHT_INDEX_DICT = {
torch.nn.functional.conv1d : [1],
torch.nn.functional.conv2d : [1],
torch.nn.functional.conv3d : [1],
torch.nn.functional.linear : [1],
}
def node_arg_is_weight(node: Node, arg: Any) -> bool:
if isinstance(node, Node) and node.op == 'call_function' and \
node.target in WEIGHT_INDEX_DICT:
for i, node_arg in enumerate(node.args):
if arg is node_arg and i in \
WEIGHT_INDEX_DICT[node.target]: # type: ignore[index]
return True
return False
CONV_OPS_WITH_BIAS = {
torch.nn.functional.conv1d,
torch.nn.functional.conv2d,
torch.nn.functional.conv3d,
}
CONV_BIAS_ARG_INDEX = 2
def node_arg_is_bias(node: Node, arg: Any) -> bool:
if isinstance(node, Node) and node.op == 'call_function':
if node.target in CONV_OPS_WITH_BIAS:
for i, node_arg in enumerate(node.args):
if arg is node_arg and i == CONV_BIAS_ARG_INDEX:
return True
elif node.target is torch.nn.functional.linear:
for kwarg_name, kwarg_value in node.kwargs.items():
if kwarg_name == 'bias' and arg is kwarg_value:
return True
return False
# weight prepacking ops
WEIGHT_PREPACK_OPS = {
torch._ops.ops.quantized.linear_prepack,
torch._ops.ops.quantized.linear_prepack_fp16,
torch._ops.ops.quantized.conv1d_prepack,
torch._ops.ops.quantized.conv2d_prepack,
torch._ops.ops.quantized.conv3d_prepack,
}
class Quantizer:
def __init__(self):
# mapping from matched node to full qualified path of activation_post_process
# must be filled before convert
self.activation_post_process_map: Dict[str, List[str]] = {}
# mapping from matched node to the index of activation_post_process that we are
# using currently
self.activation_post_process_indexes: Dict[str, int] = {}
# mapping from node name to qconfig that should be used for that node
# filled out for a model during _generate_qconfig_map
self.qconfig_map: Dict[str, QConfigAny] = {}
# mapping from fully qualified module name to module instance
# for example,
# {
# '': Model(...),
# 'linear': Linear(...),
# 'linear.weight_fake_quant': PerChannelMinMaxObserver(...),
# }
self.modules: Dict[str, torch.nn.Module] = {}
# mapping from a tuple of nodes in reverse order to uninitialized
# QuantizeHandler subclass. For example,
# {
# # match a single node
# (<class 'torch.nn.modules.conv.Conv3d'>:
# <class 'torch.quantization.fx.quantize.ConvRelu'>),
# # match multiple nodes in reverse order
# ((<function relu at 0x7f766a7360d0>, <built-in function add>):
# <class 'torch.quantization.fx.quantize.Add'>),
# }
self.patterns: Dict[Pattern, QuantizeHandler] = {}
self.prepare_custom_config_dict: Dict[str, Any] = {}
# mapping from node name to the scope of the module which contains the node.
self.node_name_to_scope: Dict[str, Tuple[str, type]] = {}
def _qat_swap_modules(
self, root: torch.nn.Module,
additional_qat_module_mapping: Dict[Callable, Callable]) -> None:
all_mappings = get_combined_dict(
get_default_qat_module_mappings(), additional_qat_module_mapping)
convert(root, mapping=all_mappings, inplace=True, remove_qconfig=False)
def _generate_qconfig_map(
self,
root: torch.nn.Module,
input_graph: Graph,
qconfig_dict: Any,
node_name_to_scope: Dict[str, Tuple[str, type]]) -> None:
global_qconfig = qconfig_dict.get("", None)
self.node_name_to_scope = node_name_to_scope
self.qconfig_map = dict()
for node in input_graph.nodes:
if node.op == "get_attr":
module_name, _ = _parent_name(node.target)
self.qconfig_map[node.name] = get_qconfig(
qconfig_dict, type(self.modules[module_name]), module_name, global_qconfig)
elif node.op == "call_function":
# precedence: [TODO] module_name_qconfig (need scope support
# from fx)
# > function_qconfig > global_qconfig
# module_name takes precedence over function qconfig
function_qconfig = get_object_type_qconfig(
qconfig_dict, node.target, global_qconfig)
module_path, module_type = node_name_to_scope[node.name]
qconfig = get_qconfig(
qconfig_dict, module_type, module_path, function_qconfig)
self.qconfig_map[node.name] = qconfig
elif node.op == "call_method":
module_path, module_type = node_name_to_scope[node.name]
# use the qconfig of the module that the node belongs to
qconfig = get_qconfig(
qconfig_dict, module_type, module_path, global_qconfig)
self.qconfig_map[node.name] = qconfig
elif node.op == 'call_module':
module_qconfig = get_qconfig(
qconfig_dict, type(self.modules[node.target]), node.target, global_qconfig)
# regex is not supported eager mode propagate_qconfig_, we'll
# need to set the qconfig explicitly here in case regex
# is used
self.modules[node.target].qconfig = module_qconfig
self.qconfig_map[node.name] = module_qconfig
def _match(self,
model, graph, standalone_module_names, standalone_module_classes,
custom_module_classes) -> Tuple[
Dict[str, MatchResult],
Dict[str, List[Tuple[DefaultQuantizeHandler, Callable]]]]:
matches = self._find_matches(
graph, self.modules, self.patterns, standalone_module_names,
standalone_module_classes, custom_module_classes)
# find _inputs_ to matched nodes that are not quantized, these
# have to be quantized, which requires measuring stats,
# initialize an DefaultQuantizeHandler object for each
quants = self._find_quants(graph, self.modules, matches)
return matches, quants
def _prepare(
self,
model: GraphModule,
qconfig_dict: Any,
node_name_to_scope: Dict[str, Tuple[str, type]],
prepare_custom_config_dict: Optional[Dict[str, Any]],
is_standalone_module: bool) -> ObservedGraphModule:
""" standalone_module means it a submodule that is not inlined in
parent module, and will be quantized separately as one unit.
How the standalone module is observed is specified by `input_quantized_idxs` and
`output_quantized_idxs` in the prepare_custom_config for the standalone module
Returns:
model(GraphModule): prepared standalone module
attributes:
_standalone_module_input_quantized_idxs(List[Int]): a list of
indexes for the graph input that is expected to be quantized,
same as input_quantized_idxs configuration provided
for the standalone module
_standalone_module_output_quantized_idxs(List[Int]): a list of
indexs for the graph output that is quantized
same as input_quantized_idxs configuration provided
for the standalone module
"""
if prepare_custom_config_dict is None:
prepare_custom_config_dict = {}
self.prepare_custom_config_dict = prepare_custom_config_dict
additional_quant_patterns = \
prepare_custom_config_dict.get("additional_quant_pattern", {})
self.patterns = get_combined_dict(
get_default_quant_patterns(), additional_quant_patterns)
convert_dict_to_ordered_dict(qconfig_dict)
flattened_qconfig_dict = get_flattened_qconfig_dict(qconfig_dict)
# TODO: support regex as well
propagate_qconfig_(model, flattened_qconfig_dict)
if model.training:
additional_qat_module_mapping = prepare_custom_config_dict.get(
"additional_qat_module_mapping", {})
self._qat_swap_modules(model, additional_qat_module_mapping)
self.modules = dict(model.named_modules())
# fill self.qconfig_map, a map from node name to qconfig, used in _find_matches
self._generate_qconfig_map(model, model.graph, qconfig_dict, node_name_to_scope)
# match the patterns that will get quantized
standalone_module_name_configs = prepare_custom_config_dict.get(
"standalone_module_name", [])
standalone_module_class_configs = prepare_custom_config_dict.get(
"standalone_module_class", [])
standalone_module_names = [config[0] for config in standalone_module_name_configs]
standalone_module_classes = [config[0] for config in standalone_module_class_configs]
custom_module_classes = get_custom_module_class_keys(
prepare_custom_config_dict, "float_to_observed_custom_module_class")
matches, quants = self._match(
model, model.graph, standalone_module_names, standalone_module_classes,
custom_module_classes)
self.activation_post_process_map = defaultdict(list)
observed_graph = Graph()
observed_node_names_set: Set[str] = set()
input_quantized_idxs: List[int] = self.prepare_custom_config_dict.get(
"input_quantized_idxs", [])
output_quantized_idxs: List[int] = self.prepare_custom_config_dict.get(
"output_quantized_idxs", [])
result_node = insert_observers_for_model(
model, self.modules, matches, quants, observed_node_names_set,
self.qconfig_map, self.activation_post_process_map, self.activation_post_process_indexes,
observed_graph, prepare_custom_config_dict, input_quantized_idxs, output_quantized_idxs)
self.modules = dict(model.named_modules())
matches, quants = self._match(
model, observed_graph, standalone_module_names, standalone_module_classes,
custom_module_classes)
observed_graph = handle_copy_nodes(
observed_graph, matches, quants, self.qconfig_map,
self.activation_post_process_map, self.modules)
self.modules = dict(model.named_modules())
matches, quants = self._match(
model, observed_graph, standalone_module_names, standalone_module_classes,
custom_module_classes)
observed_graph = handle_cat_nodes(
model, observed_graph, matches, quants, self.activation_post_process_map,
self.modules)
self.save_state(model)
model = ObservedGraphModule(model, observed_graph)
if is_standalone_module:
assert result_node is not None
assert isinstance(result_node.args[0], Node), \
"standalone module only supports returning simple value currently"\
"(not tuple, dict etc.)"
# indicator for whether output is observed or not.
# This used for correctly quantize standalone modules
output_is_observed = \
result_node.args[0].name in observed_node_names_set
# these inputs are observed in parent
# converting List[int] to Tensor since module attribute is
# Union[Tensor, Module]
model._standalone_module_input_quantized_idxs = \
torch.tensor(input_quantized_idxs)
model._standalone_module_output_quantized_idxs = torch.tensor(output_quantized_idxs)
return model
def save_state(self, observed: GraphModule) -> None:
observed._activation_post_process_map = \
self.activation_post_process_map # type: ignore[assignment]
observed._activation_post_process_indexes = \
self.activation_post_process_indexes # type: ignore[assignment]
observed._patterns = self.patterns # type: ignore[assignment]
observed._qconfig_map = self.qconfig_map # type: ignore[assignment]
observed._prepare_custom_config_dict = \
self.prepare_custom_config_dict # type: ignore[assignment]
observed._node_name_to_scope = self.node_name_to_scope # type: ignore[assignment]
def restore_state(self, observed: GraphModule) -> None:
assert is_observed_module(observed), \
'incoming model must be produced by prepare_fx'
self.activation_post_process_map = \
observed._activation_post_process_map # type: ignore[assignment]
self.activation_post_process_indexes = \
observed._activation_post_process_indexes # type: ignore[assignment]
self.patterns = observed._patterns # type: ignore[assignment]
self.qconfig_map = observed._qconfig_map # type: ignore[assignment]
self.prepare_custom_config_dict = \
observed._prepare_custom_config_dict # type: ignore[assignment]
self.node_name_to_scope = observed._node_name_to_scope # type: ignore[assignment]
def prepare(
self,
model: GraphModule,
qconfig_dict: Any,
node_name_to_scope: Dict[str, Tuple[str, type]],
prepare_custom_config_dict: Dict[str, Any] = None,
is_standalone_module: bool = False) -> ObservedGraphModule:
return self._prepare(
model, qconfig_dict, node_name_to_scope, prepare_custom_config_dict,
is_standalone_module)
def _run_weight_observers(self, observed: GraphModule) -> None:
r''' Extract the subgraph that produces the weight for dynamic quant
or weight only quant node and run the subgraph to observe the weight.
Note that the observers of dynamic quant or weight only quant ops are
run during the convert step.
'''
for node in observed.graph.nodes:
if node.op == 'call_function' and node.target in WEIGHT_INDEX_DICT:
for i, node_arg in enumerate(node.args):
if i in WEIGHT_INDEX_DICT[node.target]:
# node_arg is weight
weight_observer_nodes = collect_producer_nodes(node_arg)
if weight_observer_nodes is not None:
weight_observer_module = \
graph_module_from_producer_nodes(
observed, weight_observer_nodes)
# run the weight observer
weight_observer_module()
return
def _convert(self, model: GraphModule, is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None,
is_standalone_module: bool = False,
_remove_qconfig_flag: bool = True) -> QuantizedGraphModule:
""" standalone_module means it a submodule that is not inlined in
parent module, and will be quantized separately as one unit.
Returns a quantized standalone module, whether input/output is quantized is
specified by prepare_custom_config_dict, with
input_quantized_idxs, output_quantized_idxs, please
see docs for prepare_fx for details
"""
if convert_custom_config_dict is None:
convert_custom_config_dict = {}
self.restore_state(model)
# always run weight observers in the top level forward method
# for dynamic quant ops or weight only quant ops
self._run_weight_observers(model)
# move to cpu since we only have quantized cpu kernels
model.eval().cpu()
self.modules = dict(model.named_modules(remove_duplicate=False))
custom_module_classes = get_custom_module_class_keys(
convert_custom_config_dict,
"observed_to_quantized_custom_module_class")
matches = self._find_matches(
model.graph, self.modules, self.patterns,
custom_module_classes=custom_module_classes)
quants: Dict[str, List[Tuple[DefaultQuantizeHandler, Callable]]] = \
self._find_quants(model.graph, self.modules, matches)
self.quantized_graph = Graph()
env: Dict[str, Node] = {}
# TODO: merge quant_env with env
quant_env: Dict[str, Tuple[Node, torch.dtype]] = {}
graph_inputs: List[str] = []
for node in model.graph.nodes:
if node.op == 'placeholder':
graph_inputs.append(node.name)
def load_non_quantized(n: Node) -> Node:
if n.name not in env:
assert n.name in quant_env, \
'trying to load float node but did not find ' + \
'node:' + n.name + \
' in quantized or non quantized environment, env: ' + \
str(env) + ' quant_env:' + str(quant_env)
quantized_node, _ = quant_env[n.name]
env[n.name] = Proxy(quantized_node).dequantize().node
return env[n.name]
def load_quantized(n: Node) -> Node:
assert n.name in quant_env, \
'trying to load quantized node but did not find node:' + \
n.name + ' in quant environment:' + str(quant_env)
return quant_env[n.name][0]
def load_x(n: Node) -> Node:
assert n.name in env or n.name in quant_env, \
'node ' + n.name + ' does not exist in either environment'
if n.name in quant_env:
return quant_env[n.name][0]
else:
return env[n.name]
def load_arg(quantized: Optional[Union[List[int], bool, Tuple[int, ...]]]
) -> Callable[[Node], Argument]:
"""
Input: quantized, which can be None, list, boolean or tuple
- if quantized is None, then we'll load the node as long as it
exists
- if quantized is a boolean, then all args will be
quantized/not quantized
- if quantized is an empty list or tuple, then it is the same as load_arg(quantized=False)
- if quantized is a list or tuple, then arg should be a list and
the args with corresponding indexes will be quantized
Output: fn which takes arg_or_args, and loads them from the
corresponding environment depending on the value of quantized.
"""
assert quantized is None or \
isinstance(quantized, (tuple, list, bool)), type(quantized)
if isinstance(quantized, (tuple, list)) and len(quantized) == 0:
# empty tuple or list means nothing is quantized
quantized = False
def load_arg_impl(arg_or_args):
# we'll update the format of `quantized`
# to better match arg_or_args
updated_quantized: Optional[Union[List[int], bool, Tuple[int, ...]]] = quantized
if isinstance(quantized, (tuple, list)) and \
len(quantized) == 1 and isinstance(arg_or_args, Node):
# when argument is one Node instead of tuple, we just need to check
# 0 is in the quantized list
updated_quantized = 0 in quantized
if updated_quantized is None:
return map_arg(arg_or_args, load_x)
if isinstance(updated_quantized, bool):
return map_arg(
arg_or_args,
load_quantized if updated_quantized else load_non_quantized)
elif isinstance(updated_quantized, (tuple, list)):
assert isinstance(arg_or_args, (tuple, list)), arg_or_args
loaded_args = []
# for now, we only support quantizing positional arguments
for i, a in enumerate(arg_or_args):
if i in updated_quantized:
loaded_args.append(map_arg(a, load_quantized))
else:
loaded_args.append(map_arg(a, load_non_quantized))
return type(arg_or_args)(loaded_args)
return load_arg_impl
def node_arg_is_quantized(node_arg: Any) -> bool:
if isinstance(node_arg, Node):
assert node_arg.name in env or node_arg.name in quant_env, \
'Expecting node_arg to be in the environment'
# there might be nodes appearing in both environemnts, but
# quant_env will take precedence
if node_arg.name in quant_env:
return True
elif node_arg.name in env:
return False
else:
return False
elif isinstance(node_arg, list):
quantized = map(node_arg_is_quantized, node_arg)
if all(quantized):
return True
elif not any(quantized):
return False
else:
raise Exception(
"partially quantized inputs in list not handled yet")
else:
return False
def is_output_quantized(node: Node, obj: QuantizeHandler) -> bool:
""" Check if output node is quantized or not """
assert self.modules is not None
# by default the output for a quantizable node is expected to be quantized
quantized = True
# Need to get correct quantized/non-quantized state forn the output
# of FixedQParamsQuantizeHandler
# TODO: we may want to try to remove the special case here
# as well
if type(obj) in [
CopyNodeQuantizeHandler,
FixedQParamsOpQuantizeHandler
]:
assert node.op in [
'call_module',
'call_function',
'call_method'], \
'FixedQParamsQuantizeHandler of type ' + node.op + ' is not handled'
# TODO: need to extend this to consider all relevant args instead of just arg[0]
quantized = node_arg_is_quantized(node.args[0])
# the output is unquantized if the node is not a CopyNode
# and activation is fp16 (since we will output fp32 currently for fp16
# converter
if not activation_is_int8_quantized(qconfig) or \
not input_output_observed(obj):
quantized = False
if node_return_type_is_int(node):
quantized = False
return quantized
def insert_quantize_node(node: Node) -> None:
""" Given a activation_post_process module call node, insert a
quantize node"""
assert self.modules is not None
assert isinstance(node.target, str)
observer_module = self.modules[node.target]
prev_node = node.args[0]
if observer_module.dtype == torch.float32:
# copy the observer for fp32 dtype
env[node.name] = self.quantized_graph.node_copy(
node, load_non_quantized)
elif isinstance(prev_node, Node) and prev_node.name in quant_env:
# if previous node is already quantized, we'll just remove the
# activation_post_process
_, prev_dtype = quant_env[prev_node.name]
current_dtype = observer_module.dtype
if prev_dtype == current_dtype:
quant_env[node.name] = quant_env[prev_node.name]
else:
root_module = self.modules[""]
assert isinstance(prev_node, Node)
observer_dtype: torch.dtype = observer_module.dtype # type: ignore[assignment]
quant_env[node.name] = (
quantize_node(self, load_non_quantized(prev_node),
observer_module, node, is_input=True),
observer_dtype)
else:
# replace activation post process with quantization ops
root_module = self.modules[""]
assert isinstance(node.args[0], Node)
dtype: torch.dtype = observer_module.dtype # type: ignore[assignment]
quant_env[node.name] = (
quantize_node(self, load_non_quantized(node.args[0]),
observer_module, node, is_input=True),
dtype)
# additional state to override inputs to be quantized, if specified
# by the user
placeholder_node_seen_cnt = 0
output_node_seen_cnt = 0
input_quantized_idxs: List[int] = self.prepare_custom_config_dict.get(
"input_quantized_idxs", [])
output_quantized_idxs: List[int] = self.prepare_custom_config_dict.get(
"output_quantized_idxs", [])
for node in model.graph.nodes:
if node.op == "output":
cur_output_node_idx = output_node_seen_cnt
output_node_seen_cnt += 1
if cur_output_node_idx in output_quantized_idxs:
# Result are kept quantized if the user specified the
# output_quantized_idxs override.
graph_output = map_arg(node.args[0], load_x)
else:
graph_output = map_arg(node.args[0], load_non_quantized)
self.quantized_graph.output(graph_output)
continue
root_node, matched, matched_pattern, obj, qconfig = \
matches.get(node.name, (None, None, None, None, None))
if root_node is node:
is_observed_standalone_module_node = (
node.op == 'call_module' and
is_observed_standalone_module(
self.modules[node.target])
)
if qconfig is None and not is_observed_standalone_module_node:
result = self.quantized_graph.node_copy(
node, load_non_quantized)
quantized = False
else:
assert obj is not None
# We will get whether the output is quantized or not before
# convert for standalone module and after convert
# for non-standalone module, since _standalone_module_output_quantized_idxs
# is only available in observed standalone module
if is_observed_standalone_module_node:
out_quant_idxs = self.modules[node.target]._standalone_module_output_quantized_idxs.tolist() # type: ignore[operator] # noqa: B950
assert len(out_quant_idxs) <= 1, "Currently standalone only support one output"
quantized = 0 in out_quant_idxs
result = obj.convert(
self, node, load_arg, is_reference=is_reference,
convert_custom_config_dict=convert_custom_config_dict)
if not is_observed_standalone_module_node:
quantized = is_output_quantized(node, obj)
if quantized:
quant_env[node.name] = result, activation_dtype(qconfig)
else:
env[node.name] = result
continue
elif root_node is not None:
if qconfig is None:
# This branch is hit if all of these conditions are met:
# 1. we are in a fusion pattern of multiple nodes (i.e. add-relu)
# 2. the current node is not the "root_node" of the pattern
# 3. quantization for this pattern is disabled
#
# In this case, we need to make sure to populate the env with
# intermediate nodes manually, because the QuantizeHandler.convert
# function will not be called.
result = self.quantized_graph.node_copy(
node, load_non_quantized)
env[node.name] = result
continue
# handle activation post process calls
if node.op == 'call_module' and \
is_activation_post_process(self.modules[node.target]):
insert_quantize_node(node)
elif node.op == 'placeholder':
cur_placeholder_node_idx = placeholder_node_seen_cnt
placeholder_node_seen_cnt += 1
if cur_placeholder_node_idx in input_quantized_idxs:
quant_env[node.name] = \
self.quantized_graph.node_copy(node, load_non_quantized), activation_dtype(qconfig) if qconfig else None
else:
env[node.name] = \
self.quantized_graph.node_copy(node, load_non_quantized)
else:
# copy quantized or non-quantized node
env[node.name] = \
self.quantized_graph.node_copy(node, load_non_quantized)
# remove activation post process
act_post_process_removed_graph = Graph()
env = {}
def load_arg_simple(a: Argument) -> Argument:
return map_arg(a, lambda node: env[node.name])
for node in self.quantized_graph.nodes:
if node.op == 'output':
act_post_process_removed_graph.output(
map_arg(node.args[0], load_arg_simple))
continue
if node.op == 'call_module' and \
is_activation_post_process(self.modules[node.target]):
# remove activation post process node
env[node.name] = env[node.args[0].name]
else:
env[node.name] = act_post_process_removed_graph.node_copy(
node, load_arg_simple)
# removes qconfig and activation_post_process modules
if _remove_qconfig_flag:
_remove_qconfig(model)
model = QuantizedGraphModule(model, act_post_process_removed_graph)
return model
# Trace back from the weight node util we hit getattr, reconstruct the
# graph module with the traced nodes and run the graph module to pack the
# weight. then replace the original chain of ops with the packed weight.
def _fold_weight(self, quantized: QuantizedGraphModule) -> QuantizedGraphModule:
packed_weights = dict()
# map from folded node name to the prepacked weight name
folded_nodes = dict()
# get packed weights
for node in quantized.graph.nodes:
if node.op == 'call_function' and node.target in WEIGHT_PREPACK_OPS:
nodes_to_fold = collect_producer_nodes(node)
if nodes_to_fold is not None:
for node_to_fold in nodes_to_fold:
folded_nodes[node_to_fold.name] = node
prepacking_module = graph_module_from_producer_nodes(
quantized, nodes_to_fold)
packed_weight = prepacking_module()
packed_weights[node.name] = packed_weight
# remove folded nodes and replace the prepacking node with getattr
folded_graph = Graph()
env: Dict[Any, Any] = {}
def load_arg(a):
return map_arg(a, lambda node: env[node.name])
quantized_root = quantized
quantized_graph = quantized.graph
for node in quantized_graph.nodes:
prepack_node = folded_nodes.get(node.name, None)
if prepack_node is node:
packed_weight = packed_weights[node.name]
# add a prepacked attribute to root
op_node = list(prepack_node.users)[0]
module_path, _ = self.node_name_to_scope[op_node.name]
get_new_packed_weight_name = \
get_new_attr_name_with_prefix(module_path + '_packed_weight_')
packed_weight_name = get_new_packed_weight_name(quantized_root)
setattr(quantized_root, packed_weight_name, packed_weight)
# replace prepack node with a getattr node
env[node.name] = folded_graph.create_node(
'get_attr', packed_weight_name, (), {})
elif prepack_node is not None:
# remove the foled node
continue
else:
# copy other nodes
env[node.name] = folded_graph.node_copy(node, load_arg)
quantized = QuantizedGraphModule(quantized_root, folded_graph)
return quantized
def _fold_quant_dequant(self, quantized: QuantizedGraphModule) -> QuantizedGraphModule:
""" If quantize op is followed by a dequantize, we fold the ops together and remove the dequant.
In the case where the only consumer of quantize_per_tensor is a dequant op, we erase both
nodes from the graph, along with the qparams associated with quantize op.
"""
for node in quantized.graph.nodes:
if node.op == 'call_function' and node.target == torch.quantize_per_tensor:
quant_uses = list(node.users)
quant_args = node.args
float_tensor = quant_args[0]
for user in quant_uses:
is_dequant = user.op == 'call_method' and user.target == "dequantize"
if is_dequant:
user.replace_all_uses_with(float_tensor)
quantized.graph.erase_node(user)
# If dequant is the only user of quant node, we erase quant node
# and all it's inputs.
if len(quant_uses) == 1:
quantized.graph.erase_node(node)
for arg in quant_args[1:]:
if isinstance(arg, Node):
quantized.graph.erase_node(arg)
return quantized
def convert(self, model: GraphModule, is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None,
is_standalone_module: bool = False,
_remove_qconfig: bool = True) -> QuantizedGraphModule:
quantized = self._convert(
model, is_reference, convert_custom_config_dict, is_standalone_module, _remove_qconfig_flag=_remove_qconfig)
if not is_reference:
quantized = self._fold_weight(quantized)
quantized = self._fold_quant_dequant(quantized)
return quantized
def _find_matches(
self, graph: Graph, modules: Dict[str, torch.nn.Module],
patterns: Dict[Pattern, QuantizeHandler],
standalone_module_names: List[str] = None,
standalone_module_classes: List[Callable] = None,
custom_module_classes: List[Any] = None) -> Dict[str, MatchResult]:
"""
Matches the nodes in the input graph to quantization patterns, and
outputs the information needed to quantize them in future steps.
Inputs:
- graph: an fx.Graph object
- modules: a mapping of fully qualified module name to instance,
for example, {'foo': ModuleFoo, ...}
- patterns: a mapping from a tuple of nodes in reverse order to
uninitialized QuantizeHandler subclass.
Outputs a map of
node_name ->
(node, matched_values, matched_pattern, QuantizeHandler instance,
qconfig)
For example, {
'relu_1': (relu_1, [relu_1], torch.nn.functional.relu,
<CopyNodeQuantizeHandler instance>, QConfig(...)),
...
}
"""
if custom_module_classes is None:
custom_module_classes = []
if standalone_module_classes is None:
standalone_module_classes = []
if standalone_module_names is None:
standalone_module_names = []
match_map: Dict[str, MatchResult] = {}
all_matched : Set[str] = set()
def record_match(pattern, node, matched):
if isinstance(pattern, tuple):
s, *args = pattern
record_match(s, node, matched)
if pattern[0] is not getattr:
for subpattern, arg in zip(args, node.args):
record_match(subpattern, arg, matched)
else:
matched.append(node)
cache_for_no_tensor_check: Dict[Node, bool] = dict()
for node in reversed(graph.nodes):
if node.name not in match_map and node.name not in all_matched:
for pattern, value in patterns.items():
if is_match(modules, node, pattern):
skip_this_match = False
if value is BinaryOpQuantizeHandler:
use_copy_node = all_node_args_have_no_tensors(node, modules, cache_for_no_tensor_check)
if use_copy_node:
# TODO(future PR): update the pattern to quantize
# handler logic to take this into account.
value = CopyNodeQuantizeHandler # type: ignore[assignment]
# to properly check for dtype support, we need to
# navigate to the base node of an add-relu or mul-relu
# pattern
base_node = node
if (
(node.op == 'call_function' and
node.target is torch.nn.functional.relu) or
(node.op == 'call_module' and
isinstance(modules[node.target], torch.nn.ReLU))
):
base_node = node.args[0]
this_node_qconfig = \
self.qconfig_map[base_node.name]
if this_node_qconfig:
dtypes = get_qconfig_dtypes(this_node_qconfig)
# TODO(future PR): update the pattern to quantize
# handler logic to take this into account.
# This needs to handle 3 cases
# 1) op and dtype is in either [is_ref or non-ref] list -> don't skip
# 2) op is not in either list (i.e. relu) -> don't skip
# 3) op is in non-ref list, but not for dtype, and op+dtype not in is_ref list -> skip
# note: the value of is_reference is unknown at prepare, so we have to cover both cases
# handle is_reference = False
skip_match_not_is_reference = (
(base_node.target in binary_op_supported_dtypes) and
(dtypes not in binary_op_supported_dtypes[base_node.target])
)
# handle is_reference = True
supported_is_reference = (
(base_node.target in binary_reference_op_supported_dtypes) and
(dtypes in binary_reference_op_supported_dtypes[base_node.target])
)
# only skip if not reference says skip and is_reference doesn't support
skip_this_match = skip_match_not_is_reference and not supported_is_reference
if not skip_this_match:
matched: List[Any] = []
record_match(pattern, node, matched)
for n in matched:
match_map[n.name] = (
node, matched, pattern, value(self, node), # type: ignore[operator]
self.qconfig_map[n.name])
all_matched.add(n.name)
# break after finding the first match
break
# add custom module instances to the match result
assert self.modules is not None
for node in graph.nodes:
if node.op == 'call_module' and \
type(self.modules[node.target]) in custom_module_classes:
custom_module_qconfig = self.qconfig_map[node.name]
match_map[node.name] = (
node, [node], None, CustomModuleQuantizeHandler(self, node),
custom_module_qconfig)
def is_standalone_module(node_target):
assert self.modules is not None
return (
node_target in standalone_module_names or # type: ignore[operator]
type(self.modules[node_target]) in standalone_module_classes # type: ignore[operator]
)
# add standalone modules to the match
for node in graph.nodes:
if node.op == 'call_module' and \
(is_standalone_module(node.target) or
is_observed_standalone_module(self.modules[node.target])):
# add node to matched nodes
custom_module_qconfig = self.qconfig_map[node.name]
match_map[node.name] = (
node, [node], None,
StandaloneModuleQuantizeHandler(self, node),
custom_module_qconfig)
return match_map
def _find_quants(
self, graph: Graph, modules: Dict[str, torch.nn.Module],
matches: Dict[str, MatchResult]) -> Dict[str, List[Tuple[DefaultQuantizeHandler, Callable]]]:
"""
Takes the nodes in the input graph and pending matches, and finds and
returns the input and output nodes which need to be quantized.
Inputs:
- graph: an fx.Graph object
- modules: a dictionary from module path to module
- matches: output of self._find_matches function
Outputs a map of
node_name -> list of (QuantizeHandler instance (always DefaultQuantizeHandler),
activation_post_process (observer/fake_quantize module) constructor)
the reason why the value is a list is because each node can be configured with multiple
qconfigs, for example in a subgraph of functional linear
op followed by a sigmoid op, linear is configured with int8 static quantization
and sigmoid is configured with float16 static quantization,
then the output of linear (and input of sigmoid) needs first to be quantized to
int8 and then float16
"""
quants: Dict[str, List[Tuple[DefaultQuantizeHandler, Callable]]] = defaultdict(list)
cache_for_no_tensor_check: Dict[Node, bool] = dict()
def visit(node, matched_pattern, qconfig):
def visit_arg(arg):
is_weight = node_arg_is_weight(node, arg)
is_bias = node_arg_is_bias(node, arg)
is_activation = not (is_weight or is_bias)
no_tensors = all_node_args_have_no_tensors(arg, modules, cache_for_no_tensor_check)
# bias needs to be quantized if activation is fp16 and weight is fp16
# this is the case for glow
should_add_handler = qconfig is not None and (
(is_activation and
activation_is_statically_quantized(qconfig)) or
(is_weight and weight_is_quantized(qconfig)) or
(is_bias and activation_dtype(qconfig) == torch.float16)
and weight_dtype(qconfig) == torch.float16) and \
(not no_tensors)
if should_add_handler:
act_post_process_ctr = qconfig.weight if is_weight else \
qconfig.activation
# overwrite the constructor from qconfig if it is int8 quantized
if activation_is_int8_quantized(qconfig):
act_post_process_ctr = \
get_default_output_activation_post_process_map().get(
matched_pattern,
act_post_process_ctr)
if len(quants[arg.name]) > 0:
_, last_act_post_process_ctr = quants[arg.name][-1]
if act_post_process_ctr == last_act_post_process_ctr:
# we won't add act_post_process_ctr if it is the same as the
# one
return visit_arg
quants[arg.name].append((
DefaultQuantizeHandler(self, arg), act_post_process_ctr))
return visit_arg
for node in graph.nodes:
if node.name in matches:
root_node, matched_nodes, matched_pattern, quantize_handler, \
qconfig = matches[node.name]
if root_node is node and \
input_output_observed(quantize_handler):
# matched_nodes[-1] is the first op in the sequence and
# matched_nodes[0] is the last op in the sequence
# inputs
# matched_pattern is set to None for inputs because
# we only want to select QuantizeHandler object based
# on pattern for output, inputs will always use
# DefaultQuantizeHandler
map_arg(matched_nodes[-1].args, visit(matched_nodes[-1],
None, qconfig))
map_arg(matched_nodes[-1].kwargs, visit(matched_nodes[-1],
None, qconfig))
# output
# we don't insert observer for output of standalone module
if not isinstance(
quantize_handler, StandaloneModuleQuantizeHandler):
# passing in matched_pattern here so that we can
# customize activation_post_process constructor for
# output based on the pattern, e.g.
# for sigmoid op we'll use
# default_affine_fixed_qparam_fake_quant
map_arg(matched_nodes[0],
visit(None, matched_pattern, qconfig))
return quants
| 47.043451
| 155
| 0.603333
|
0d963e21716fc51ad0e70fb5fe9d0ca113431ba4
| 43,076
|
py
|
Python
|
src/sage/logic/boolformula.py
|
defeo/sage
|
d8822036a9843bd4d75845024072515ede56bcb9
|
[
"BSL-1.0"
] | null | null | null |
src/sage/logic/boolformula.py
|
defeo/sage
|
d8822036a9843bd4d75845024072515ede56bcb9
|
[
"BSL-1.0"
] | null | null | null |
src/sage/logic/boolformula.py
|
defeo/sage
|
d8822036a9843bd4d75845024072515ede56bcb9
|
[
"BSL-1.0"
] | null | null | null |
r"""
Boolean Formulas
Formulas consist of the operators ``&``, ``|``, ``~``, ``^``, ``->``, ``<->``,
corresponding to ``and``, ``or``, ``not``, ``xor``, ``if...then``, ``if and
only if``. Operators can be applied to variables that consist of a leading
letter and trailing underscores and alphanumerics. Parentheses may be used
to explicitly show order of operation.
EXAMPLES:
Create boolean formulas and combine them with
:meth:`~sage.logic.boolformula.BooleanFormula.ifthen()` method::
sage: import sage.logic.propcalc as propcalc
sage: f = propcalc.formula("a&((b|c)^a->c)<->b")
sage: g = propcalc.formula("boolean<->algebra")
sage: (f&~g).ifthen(f)
((a&((b|c)^a->c)<->b)&(~(boolean<->algebra)))->(a&((b|c)^a->c)<->b)
We can create a truth table from a formula::
sage: f.truthtable()
a b c value
False False False True
False False True True
False True False False
False True True False
True False False True
True False True False
True True False True
True True True True
sage: f.truthtable(end=3)
a b c value
False False False True
False False True True
False True False False
sage: f.truthtable(start=4)
a b c value
True False False True
True False True False
True True False True
True True True True
sage: propcalc.formula("a").truthtable()
a value
False False
True True
Now we can evaluate the formula for a given set of inputs::
sage: f.evaluate({'a':True, 'b':False, 'c':True})
False
sage: f.evaluate({'a':False, 'b':False, 'c':True})
True
And we can convert a boolean formula to conjunctive normal form::
sage: f.convert_cnf_table()
sage: f
(a|~b|c)&(a|~b|~c)&(~a|b|~c)
sage: f.convert_cnf_recur()
sage: f
(a|~b|c)&(a|~b|~c)&(~a|b|~c)
Or determine if an expression is satisfiable, a contradiction, or a tautology::
sage: f = propcalc.formula("a|b")
sage: f.is_satisfiable()
True
sage: f = f & ~f
sage: f.is_satisfiable()
False
sage: f.is_contradiction()
True
sage: f = f | ~f
sage: f.is_tautology()
True
The equality operator compares semantic equivalence::
sage: f = propcalc.formula("(a|b)&c")
sage: g = propcalc.formula("c&(b|a)")
sage: f == g
True
sage: g = propcalc.formula("a|b&c")
sage: f == g
False
It is an error to create a formula with bad syntax::
sage: propcalc.formula("")
Traceback (most recent call last):
...
SyntaxError: malformed statement
sage: propcalc.formula("a&b~(c|(d)")
Traceback (most recent call last):
...
SyntaxError: malformed statement
sage: propcalc.formula("a&&b")
Traceback (most recent call last):
...
SyntaxError: malformed statement
sage: propcalc.formula("a&b a")
Traceback (most recent call last):
...
SyntaxError: malformed statement
It is also an error to not abide by the naming conventions::
sage: propcalc.formula("~a&9b")
Traceback (most recent call last):
...
NameError: invalid variable name 9b: identifiers must begin with a letter and contain only alphanumerics and underscores
AUTHORS:
- Chris Gorecki (2006): initial version
- Paul Scurek (2013-08-03): added polish_notation, full_tree,
updated docstring formatting
- Paul Scurek (2013-08-08): added
:meth:`~sage.logic.boolformula.BooleanFormula.implies()`
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2006 William Stein <wstein.gmail.com>
# Copyright (C) 2006 Chris Gorecki <chris.k.gorecki@gmail.com>
# Copyright (C) 2013 Paul Scurek <scurek86@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from . import booleval
from . import logictable
from . import logicparser
# import boolopt
from types import TupleType, ListType
from sage.misc.flatten import flatten
latex_operators = [('&', '\\wedge '),
('|', '\\vee '),
('~', '\\neg '),
('^', '\\oplus '),
('<->', '\\leftrightarrow '),
('->', '\\rightarrow ')]
class BooleanFormula(object):
"""
Boolean formulas.
INPUT:
- ``self`` -- calling object
- ``exp`` -- a string; this contains the boolean expression
to be manipulated
- ``tree`` -- a list; this contains the parse tree of the expression.
- ``vo`` -- a list; this contains the variables in the expression, in the
order that they appear; each variable only occurs once in the list
"""
__expression = ""
__tree = []
__vars_order = []
def __init__(self, exp, tree, vo):
r"""
Initialize the data fields.
EXAMPLES:
This example illustrates the creation of a statement::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b|~(c|a)")
sage: s
a&b|~(c|a)
"""
self.__expression = exp.replace(' ', '')
self.__tree = tree
self.__vars_order = vo
def __repr__(self):
r"""
Return a string representation of this statement.
OUTPUT:
A string representation of calling statement
EXAMPLES::
sage: import sage.logic.propcalc as propcalc
sage: propcalc.formula("man->monkey&human")
man->monkey&human
"""
return self.__expression
def _latex_(self):
r"""
Return a LaTeX representation of this statement.
OUTPUT:
A string containing the latex code for the statement
EXAMPLES::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("man->monkey&human")
sage: latex(s)
man\rightarrow monkey\wedge human
sage: f = propcalc.formula("a & ((~b | c) ^ a -> c) <-> ~b")
sage: latex(f)
a\wedge ((\neg b\vee c)\oplus a\rightarrow c)\leftrightarrow \neg b
"""
latex_expression = self.__expression
for old, new in latex_operators:
latex_expression = latex_expression.replace(old, new)
return latex_expression
def polish_notation(self):
r"""
Convert the calling boolean formula into polish notation.
OUTPUT:
A string representation of the formula in polish notation.
EXAMPLES:
This example illustrates converting a formula to polish notation::
sage: import sage.logic.propcalc as propcalc
sage: f = propcalc.formula("~~a|(c->b)")
sage: f.polish_notation()
'|~~a->cb'
sage: g = propcalc.formula("(a|~b)->c")
sage: g.polish_notation()
'->|a~bc'
AUTHORS:
- Paul Scurek (2013-08-03)
"""
return ''.join(flatten(logicparser.polish_parse(repr(self))))
def tree(self):
r"""
Return the parse tree of this boolean expression.
OUTPUT:
The parse tree as a nested list
EXAMPLES:
This example illustrates how to find the parse tree of a boolean
formula::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("man -> monkey & human")
sage: s.tree()
['->', 'man', ['&', 'monkey', 'human']]
::
sage: f = propcalc.formula("a & ((~b | c) ^ a -> c) <-> ~b")
sage: f.tree()
['<->',
['&', 'a', ['->', ['^', ['|', ['~', 'b', None], 'c'], 'a'], 'c']],
['~', 'b', None]]
.. NOTE::
This function is used by other functions in the logic module
that perform semantic operations on a boolean formula.
"""
return self.__tree
def full_tree(self):
r"""
Return a full syntax parse tree of the calling formula.
OUTPUT:
The full syntax parse tree as a nested list
EXAMPLES:
This example shows how to find the full syntax parse tree
of a formula::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a->(b&c)")
sage: s.full_tree()
['->', 'a', ['&', 'b', 'c']]
sage: t = propcalc.formula("a & ((~b | c) ^ a -> c) <-> ~b")
sage: t.full_tree()
['<->', ['&', 'a', ['->', ['^', ['|', ['~', 'b'], 'c'], 'a'], 'c']], ['~', 'b']]
sage: f = propcalc.formula("~~(a&~b)")
sage: f.full_tree()
['~', ['~', ['&', 'a', ['~', 'b']]]]
.. NOTE::
This function is used by other functions in the logic module
that perform syntactic operations on a boolean formula.
AUTHORS:
- Paul Scurek (2013-08-03)
"""
return logicparser.polish_parse(repr(self))
def __or__(self, other):
r"""
Overload the ``|`` operator to 'or' two statements together.
INPUT:
- ``other`` -- a boolean formula; this is the statement
on the right side of the operator
OUTPUT:
A boolean formula of the form ``self | other``.
EXAMPLES:
This example illustrates combining two formulas with ``|``::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b")
sage: f = propcalc.formula("c^d")
sage: s | f
(a&b)|(c^d)
"""
return self.add_statement(other, '|')
def __and__(self, other):
r"""
Overload the ``&`` operator to 'and' two statements together.
INPUT:
- ``other`` -- a boolean formula; this is the formula on
the right side of the operator
OUTPUT:
A boolean formula of the form ``self & other``.
EXAMPLES:
This example shows how to combine two formulas with ``&``::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b")
sage: f = propcalc.formula("c^d")
sage: s & f
(a&b)&(c^d)
"""
return self.add_statement(other, '&')
def __xor__(self, other):
r"""
Overload the ``^`` operator to 'xor' two statements together.
INPUT:
- ``other`` -- a boolean formula; this is the formula on
the right side of the operator
OUTPUT:
A boolean formula of the form ``self ^ other``.
EXAMPLES:
This example illustrates how to combine two formulas with ``^``::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b")
sage: f = propcalc.formula("c^d")
sage: s ^ f
(a&b)^(c^d)
"""
return self.add_statement(other, '^')
def __pow__(self, other):
r"""
Overload the ``^`` operator to 'xor' two statements together.
INPUT:
- ``other`` -- a boolean formula; this is the formula on
the right side of the operator
OUTPUT:
A boolean formula of the form ``self ^ other``.
EXAMPLES:
This example shows how to combine two formulas with ``^``::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b")
sage: f = propcalc.formula("c^d")
sage: s ^ f
(a&b)^(c^d)
.. TODO::
This function seems to be identical to ``__xor__``.
Thus, this function should be replaced with ``__xor__`` everywhere
that it appears in the logic module. Then it can be deleted
altogether.
"""
return self.add_statement(other, '^')
def __invert__(self):
r"""
Overload the ``~`` operator to 'not' a statement.
OUTPUT:
A boolean formula of the form ``~self``.
EXAMPLES:
This example shows how to negate a boolean formula::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b")
sage: ~s
~(a&b)
"""
exp = '~(' + self.__expression + ')'
parse_tree, vars_order = logicparser.parse(exp)
return BooleanFormula(exp, parse_tree, vars_order)
def ifthen(self, other):
r"""
Combine two formulas with the ``->`` operator.
INPUT:
- ``other`` -- a boolean formula; this is the formula
on the right side of the operator
OUTPUT:
A boolean formula of the form ``self -> other``.
EXAMPLES:
This example illustrates how to combine two formulas with '->'::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b")
sage: f = propcalc.formula("c^d")
sage: s.ifthen(f)
(a&b)->(c^d)
"""
return self.add_statement(other, '->')
def iff(self, other):
r"""
Combine two formulas with the ``<->`` operator.
INPUT:
- ``other`` -- a boolean formula; this is the formula
on the right side of the operator
OUTPUT:
A boolean formula of the form ``self <-> other``.
EXAMPLES:
This example illustrates how to combine two formulas with '<->'::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b")
sage: f = propcalc.formula("c^d")
sage: s.iff(f)
(a&b)<->(c^d)
"""
return self.add_statement(other, '<->')
def __eq__(self, other):
r"""
Overload the ``==`` operator to deterine logical equivalence.
INPUT:
- ``other`` -- a boolean formula; this is the formula
on the right side of the comparator
OUTPUT:
A boolean value to be determined as follows:
- ``True`` if ``self`` and ``other`` are logically equivalent
- ``False`` if ``self`` and ``other`` are not logically equivalent
EXAMPLES:
This example shows how to determine logical equivalence::
sage: import sage.logic.propcalc as propcalc
sage: f = propcalc.formula("(a|b)&c")
sage: g = propcalc.formula("c&(b|a)")
sage: f == g
True
::
sage: g = propcalc.formula("a|b&c")
sage: f == g
False
"""
return self.equivalent(other)
def truthtable(self, start=0, end=-1):
r"""
Return a truth table for the calling formula.
INPUT:
- ``start`` -- (default: 0) an integer; this is the first
row of the truth table to be created
- ``end`` -- (default: -1) an integer; this is the laste
row of the truth table to be created
OUTPUT:
The truth table as a 2-D array
EXAMPLES:
This example illustrates the creation of a truth table::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b|~(c|a)")
sage: s.truthtable()
a b c value
False False False True
False False True False
False True False True
False True True False
True False False False
True False True False
True True False True
True True True True
We can now create a truthtable of rows 1 to 4, inclusive::
sage: s.truthtable(1, 5)
a b c value
False False True False
False True False True
False True True False
True False False False
.. NOTE::
Each row of the table corresponds to a binary number, with
each variable associated to a column of the number, and taking on
a true value if that column has a value of 1. Please see the
logictable module for details. The function returns a table that
start inclusive and end exclusive so ``truthtable(0, 2)`` will
include row 0, but not row 2.
When sent with no start or end parameters, this is an
exponential time function requiring `O(2^n)` time, where
`n` is the number of variables in the expression.
"""
max = 2 ** len(self.__vars_order)
if end < 0:
end = max
if end > max:
end = max
if start < 0:
start = 0
if start > max:
start = max
keys, table = [], []
vars = {}
for var in self.__vars_order:
vars[var] = False
keys.insert(0, var)
keys = list(keys)
for i in range(start, end):
j = 0
row = []
for key in keys:
bit = self.get_bit(i, j)
vars[key] = bit
j += 1
row.insert(0, bit)
row.append(booleval.eval_formula(self.__tree, vars))
table.append(row)
keys.reverse()
table = logictable.Truthtable(table, keys)
return table
def evaluate(self, var_values):
r"""
Evaluate a formula for the given input values.
INPUT:
- ``var_values`` -- a dictionary; this contains the
pairs of variables and their boolean values.
OUTPUT:
The result of the evaluation as a boolean.
EXAMPLES:
This example illustrates the evaluation of a boolean formula::
sage: import sage.logic.propcalc as propcalc
sage: f = propcalc.formula("a&b|c")
sage: f.evaluate({'a':False, 'b':False, 'c':True})
True
sage: f.evaluate({'a':True, 'b':False, 'c':False})
False
"""
return booleval.eval_formula(self.__tree, var_values)
def is_satisfiable(self):
r"""
Determine if the formula is ``True`` for some assignment of values.
OUTPUT:
A boolean value to be determined as follows:
- ``True`` if there is an assignment of values that makes the
formula ``True``.
- ``False`` if the formula cannot be made ``True`` by any assignment
of values.
EXAMPLES:
This example illustrates how to check a formula for satisfiability::
sage: import sage.logic.propcalc as propcalc
sage: f = propcalc.formula("a|b")
sage: f.is_satisfiable()
True
sage: g = f & (~f)
sage: g.is_satisfiable()
False
"""
table = self.truthtable().get_table_list()
for row in table[1:]:
if row[-1] is True:
return True
return False
def is_tautology(self):
r"""
Determine if the formula is always ``True``.
OUTPUT:
A boolean value to be determined as follows:
- ``True`` if the formula is a tautology.
- ``False`` if the formula is not a tautology.
EXAMPLES:
This example illustrates how to check if a formula is a tautology::
sage: import sage.logic.propcalc as propcalc
sage: f = propcalc.formula("a|~a")
sage: f.is_tautology()
True
sage: f = propcalc.formula("a&~a")
sage: f.is_tautology()
False
sage: f = propcalc.formula("a&b")
sage: f.is_tautology()
False
"""
return not (~self).is_satisfiable()
def is_contradiction(self):
r"""
Determine if the formula is always ``False``.
OUTPUT:
A boolean value to be determined as follows:
- ``True`` if the formula is a contradiction.
- ``False`` if the formula is not a contradiction.
EXAMPLES:
This example illustrates how to check if a formula is a contradiction.
::
sage: import sage.logic.propcalc as propcalc
sage: f = propcalc.formula("a&~a")
sage: f.is_contradiction()
True
sage: f = propcalc.formula("a|~a")
sage: f.is_contradiction()
False
sage: f = propcalc.formula("a|b")
sage: f.is_contradiction()
False
"""
return not self.is_satisfiable()
def implies(self, other):
r"""
Determine if calling formula implies other formula.
INPUT:
- ``self`` -- calling object
- ``other`` -- instance of :class:`BooleanFormula`
OUTPUT:
A boolean value to be determined as follows:
- ``True`` - if ``self`` implies ``other``
- ``False`` - if ``self does not imply ``other``
EXAMPLES:
This example illustrates determining if one formula implies another::
sage: import sage.logic.propcalc as propcalc
sage: f = propcalc.formula("a<->b")
sage: g = propcalc.formula("b->a")
sage: f.implies(g)
True
::
sage: h = propcalc.formula("a->(a|~b)")
sage: i = propcalc.formula("a")
sage: h.implies(i)
False
AUTHORS:
- Paul Scurek (2013-08-08)
"""
# input validation
if not isinstance(other, BooleanFormula):
raise TypeError("implies() takes an instance of the BooleanFormula() class as input")
conditional = self.ifthen(other)
return (conditional).is_tautology()
def equivalent(self, other):
r"""
Determine if two formulas are semantically equivalent.
INPUT:
- ``self`` -- calling object
- ``other`` -- instance of BooleanFormula class.
OUTPUT:
A boolean value to be determined as follows:
True - if the two formulas are logically equivalent
False - if the two formulas are not logically equivalent
EXAMPLES:
This example shows how to check for logical equivalence::
sage: import sage.logic.propcalc as propcalc
sage: f = propcalc.formula("(a|b)&c")
sage: g = propcalc.formula("c&(a|b)")
sage: f.equivalent(g)
True
sage: g = propcalc.formula("a|b&c")
sage: f.equivalent(g)
False
"""
return self.iff(other).is_tautology()
def convert_cnf_table(self):
r"""
Convert boolean formula to conjunctive normal form.
OUTPUT:
An instance of :class:`BooleanFormula` in conjunctive normal form.
EXAMPLES:
This example illustrates how to convert a formula to cnf::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a ^ b <-> c")
sage: s.convert_cnf()
sage: s
(a|b|~c)&(a|~b|c)&(~a|b|c)&(~a|~b|~c)
We now show that :meth:`convert_cnf` and :meth:`convert_cnf_table`
are aliases::
sage: t = propcalc.formula("a ^ b <-> c")
sage: t.convert_cnf_table(); t
(a|b|~c)&(a|~b|c)&(~a|b|c)&(~a|~b|~c)
sage: t == s
True
.. NOTE::
This method creates the cnf parse tree by examining the logic
table of the formula. Creating the table requires `O(2^n)` time
where `n` is the number of variables in the formula.
"""
str = ''
t = self.truthtable()
table = t.get_table_list()
vars = table[0]
for row in table[1:]:
if row[-1] is False:
str += '('
for i in range(len(row) - 1):
if row[i] is True:
str += '~'
str += vars[i]
str += '|'
str = str[:-1] + ')&'
self.__expression = str[:-1]
# in case of tautology
if len(self.__expression) == 0:
self.__expression = '(' + self.__vars_order[0] + '|~' + self.__vars_order[0] + ')'
self.__tree, self.__vars_order = logicparser.parse(self.__expression)
convert_cnf = convert_cnf_table
def convert_cnf_recur(self):
r"""
Convert boolean formula to conjunctive normal form.
OUTPUT:
An instance of :class:`BooleanFormula` in conjunctive normal form.
EXAMPLES:
This example hows how to convert a formula to conjunctive normal form::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a^b<->c")
sage: s.convert_cnf_recur()
sage: s
(~a|a|c)&(~b|a|c)&(~a|b|c)&(~b|b|c)&(~c|a|b)&(~c|~a|~b)
.. NOTE::
This function works by applying a set of rules that are
guaranteed to convert the formula. Worst case the converted
expression has an `O(2^n)` increase in size (and time as well), but
if the formula is already in CNF (or close to) it is only `O(n)`.
This function can require an exponential blow up in space from the
original expression. This in turn can require large amounts of
time. Unless a formula is already in (or close to) being in cnf
:meth:`convert_cnf()` is typically preferred, but results can vary.
"""
self.__tree = logicparser.apply_func(self.__tree, self.reduce_op)
self.__tree = logicparser.apply_func(self.__tree, self.dist_not)
self.__tree = logicparser.apply_func(self.__tree, self.dist_ors)
self.convert_expression()
def satformat(self):
r"""
Return the satformat representation of a boolean formula.
OUTPUT:
The satformat of the formula as a string.
EXAMPLES:
This example illustrates how to find the satformat of a formula::
sage: import sage.logic.propcalc as propcalc
sage: f = propcalc.formula("a&((b|c)^a->c)<->b")
sage: f.convert_cnf()
sage: f
(a|~b|c)&(a|~b|~c)&(~a|b|~c)
sage: f.satformat()
'p cnf 3 0\n1 -2 3 0 1 -2 -3 \n0 -1 2 -3'
.. NOTE::
See www.cs.ubc.ca/~hoos/SATLIB/Benchmarks/SAT/satformat.ps for a
description of satformat.
If the instance of boolean formula has not been converted to
CNF form by a call to :meth:`convert_cnf()` or
:meth:`convert_cnf_recur()`, then :meth:`satformat()` will call
:meth:`convert_cnf()`. Please see the notes for
:meth:`convert_cnf()` and :meth:`convert_cnf_recur()` for
performance issues.
"""
self.convert_cnf_table()
s = ''
vars_num = {}
i = 0
clauses = 0
for e in self.__vars_order:
vars_num[e] = str(i + 1)
i += 1
i = 0
w = 1
while i < len(self.__expression):
c = self.__expression[i]
if c == ')':
clauses += 1
if c in '()|':
i += 1
continue
if c == '~':
s += '-'
elif c == '&':
s += '0 '
else:
varname = ''
while i < self.__expression[i] not in '|) ':
varname += self.__expression[i]
i += 1
s += vars_num[varname] + ' '
if len(s) >= (w * 15) and s[-1] != '-':
s += '\n'
w += 1
i += 1
s = 'p cnf ' + str(len(self.__vars_order)) + ' ' + str(clauses) + '\n' + s
return s[:-1]
# def simplify(self):
# r"""
# This function uses the propcalc package to simplify an expression to
# its minimal form.
#
# INPUT:
# self -- the calling object.
#
# OUTPUT:
# A simplified expression.
#
# EXAMPLES:
# sage: import sage.logic.propcalc as propcalc
# sage: f = propcalc.formula("a&((b|c)^a->c)<->b")
# sage: f.truthtable()
# a b c value
# False False False True
# False False True True
# False True False False
# False True True False
# True False False True
# True False True False
# True True False True
# True True True True
# sage: f.simplify()
# (~a&~b)|(a&~b&~c)|(a&b)
# sage: f.truthtable()
# a b c value
# False False False True
# False False True True
# False True False False
# False True True False
# True False False True
# True False True False
# True True False True
# True True True True
#
# .. NOTES::
#
# If the instance of boolean formula has not been converted to
# cnf form by a call to convert_cnf() or convert_cnf_recur()
# satformat() will call convert_cnf(). Please see the notes for
# convert_cnf() and convert_cnf_recur() for performance issues.
# """
# exp = ''
# self.__tree = logicparser.apply_func(self.__tree, self.reduce_op)
# plf = logicparser.apply_func(self.__tree, self.convert_opt)
# wff = boolopt.PLFtoWFF()(plf) # convert to positive-normal form
# wtd = boolopt.WFFtoDNF()
# dnf = wtd(wff)
# dnf = wtd.clean(dnf)
# if(dnf == [] or dnf == [[]]):
# exp = self.__vars_order[0] + '&~' + self.__vars_order[0] + ' '
# opt = boolopt.optimize(dnf)
# if(exp == '' and (opt == [] or opt == [[]])):
# exp = self.__vars_order[0] + '|~' + self.__vars_order[0] + ' '
# if(exp == ''):
# for con in opt:
# s = '('
# for prop in con:
# if(prop[0] == 'notprop'):
# s += '~'
# s += prop[1] + '&'
# exp += s[:-1] + ')|'
# self.__expression = exp[:-1]
# self.__tree, self.__vars_order = logicparser.parse(self.__expression)
# return BooleanFormula(self.__expression, self.__tree, self.__vars_order)
def convert_opt(self, tree):
r"""
Convert a parse tree to the tuple form used by :meth:`bool_opt()`.
INPUT:
- ``tree`` -- a list; this is a branch of a
parse tree and can only contain the '&', '|'
and '~' operators along with variables
OUTPUT:
A 3-tuple.
EXAMPLES:
This example illustrates the conversion of a formula into its
corresponding tuple::
sage: import sage.logic.propcalc as propcalc, sage.logic.logicparser as logicparser
sage: s = propcalc.formula("a&(b|~c)")
sage: tree = ['&', 'a', ['|', 'b', ['~', 'c', None]]]
sage: logicparser.apply_func(tree, s.convert_opt)
('and', ('prop', 'a'), ('or', ('prop', 'b'), ('not', ('prop', 'c'))))
.. NOTE::
This function only works on one branch of the parse tree. To
apply the function to every branch of a parse tree, pass the
function as an argument in
:func:`~sage.logic.logicparser.apply_func()` in
:mod:`~sage.logic.logicparser`.
"""
if not isinstance(tree[1], TupleType) and not (tree[1] is None):
lval = ('prop', tree[1])
else:
lval = tree[1]
if not isinstance(tree[2], TupleType) and not(tree[2] is None):
rval = ('prop', tree[2])
else:
rval = tree[2]
if tree[0] == '~':
return ('not', lval)
if tree[0] == '&':
op = 'and'
if tree[0] == '|':
op = 'or'
return (op, lval, rval)
def add_statement(self, other, op):
r"""
Combine two formulas with the given operator.
INPUT:
- ``other`` -- instance of :class:`BooleanFormula`; this
is the formula on the right of the operator
- ``op`` -- a string; this is the operator used to
combine the two formulas
OUTPUT:
The result as an instance of :class:`BooleanFormula`.
EXAMPLES:
This example shows how to create a new formula from two others::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b")
sage: f = propcalc.formula("c^d")
sage: s.add_statement(f, '|')
(a&b)|(c^d)
sage: s.add_statement(f, '->')
(a&b)->(c^d)
"""
exp = '(' + self.__expression + ')' + op + '(' + other.__expression + ')'
parse_tree, vars_order = logicparser.parse(exp)
return BooleanFormula(exp, parse_tree, vars_order)
def get_bit(self, x, c):
r"""
Determine if bit ``c`` of the number ``x`` is 1.
INPUT:
- ``x`` -- an integer; this is the number from
which to take the bit
- ``c`` -- an integer; this is the but number to
be taken, where 0 is the low order bit
OUTPUT:
A boolean to be determined as follows:
- ``True`` if bit ``c`` of ``x`` is 1.
- ``False`` if bit c of x is not 1.
EXAMPLES:
This example illustrates the use of :meth:`get_bit`::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b")
sage: s.get_bit(2, 1)
True
sage: s.get_bit(8, 0)
False
It is not an error to have a bit out of range::
sage: s.get_bit(64, 7)
False
Nor is it an error to use a negative number::
sage: s.get_bit(-1, 3)
False
sage: s.get_bit(64, -1)
True
sage: s.get_bit(64, -2)
False
.. NOTE::
The 0 bit is the low order bit. Errors should be handled
gracefully by a return of ``False``, and negative numbers ``x``
always return ``False`` while a negative ``c`` will index from the
high order bit.
"""
bits = []
while x > 0:
if x % 2 == 0:
b = False
else:
b = True
x = int(x / 2)
bits.append(b)
if c > len(bits) - 1:
return False
else:
return bits[c]
def reduce_op(self, tree):
r"""
Convert if-and-only-if, if-then, and xor operations to operations
only involving and/or operations.
INPUT:
- ``tree`` -- a list; this represents a branch
of a parse tree
OUTPUT:
A new list with no '^', '->', or '<->' as first element of list.
EXAMPLES:
This example illustrates the use of :meth:`reduce_op` with
:func:`apply_func`::
sage: import sage.logic.propcalc as propcalc, sage.logic.logicparser as logicparser
sage: s = propcalc.formula("a->b^c")
sage: tree = ['->', 'a', ['^', 'b', 'c']]
sage: logicparser.apply_func(tree, s.reduce_op)
['|', ['~', 'a', None], ['&', ['|', 'b', 'c'], ['~', ['&', 'b', 'c'], None]]]
.. NOTE::
This function only operates on a single branch of a parse tree.
To apply the function to an entire parse tree, pass the function
as an argument to :func:`~sage.logic.logicparser.apply_func()`
in :mod:`~sage.logic.logicparser`.
"""
if tree[0] == '<->':
# parse tree for (~tree[1]|tree[2])&(~tree[2]|tree[1])
new_tree = ['&', ['|', ['~', tree[1], None], tree[2]],
['|', ['~', tree[2], None], tree[1]]]
elif tree[0] == '^':
# parse tree for (tree[1]|tree[2])&~(tree[1]&tree[2])
new_tree = ['&', ['|', tree[1], tree[2]],
['~', ['&', tree[1], tree[2]], None]]
elif tree[0] == '->':
# parse tree for ~tree[1]|tree[2]
new_tree = ['|', ['~', tree[1], None], tree[2]]
else:
new_tree = tree
return new_tree
def dist_not(self, tree):
r"""
Distribute '~' operators over '&' and '|' operators.
INPUT:
- ``tree`` a list; this represents a branch
of a parse tree
OUTPUT:
A new list.
EXAMPLES:
This example illustrates the distribution of '~' over '&'::
sage: import sage.logic.propcalc as propcalc, sage.logic.logicparser as logicparser
sage: s = propcalc.formula("~(a&b)")
sage: tree = ['~', ['&', 'a', 'b'], None]
sage: logicparser.apply_func(tree, s.dist_not) #long time
['|', ['~', 'a', None], ['~', 'b', None]]
.. NOTE::
This function only operates on a single branch of a parse tree.
To apply the function to an entire parse tree, pass the function
as an argument to :func:`~sage.logic.logicparser.apply_func()`
in :mod:`~sage.logic.logicparser`.
"""
if tree[0] == '~' and isinstance(tree[1], ListType):
op = tree[1][0]
if op != '~':
if op == '&':
op = '|'
else:
op = '&'
new_tree = [op, ['~', tree[1][1], None], ['~', tree[1][2], None]]
return logicparser.apply_func(new_tree, self.dist_not)
else:
# cancel double negative
return tree[1][1]
else:
return tree
def dist_ors(self, tree):
r"""
Distribute '|' over '&'.
INPUT:
- ``tree`` -- a list; this represents a branch of
a parse tree
OUTPUT:
A new list.
EXAMPLES:
This example illustrates the distribution of '|' over '&'::
sage: import sage.logic.propcalc as propcalc, sage.logic.logicparser as logicparser
sage: s = propcalc.formula("(a&b)|(a&c)")
sage: tree = ['|', ['&', 'a', 'b'], ['&', 'a', 'c']]
sage: logicparser.apply_func(tree, s.dist_ors) #long time
['&', ['&', ['|', 'a', 'a'], ['|', 'b', 'a']], ['&', ['|', 'a', 'c'], ['|', 'b', 'c']]]
.. NOTE::
This function only operates on a single branch of a parse tree.
To apply the function to an entire parse tree, pass the function
as an argument to :func:`~sage.logic.logicparser.apply_func()`
in :mod:`~sage.logic.logicparser`.
"""
if tree[0] == '|' and isinstance(tree[2], ListType) and tree[2][0] == '&':
new_tree = ['&', ['|', tree[1], tree[2][1]],
['|', tree[1], tree[2][2]]]
return logicparser.apply_func(new_tree, self.dist_ors)
if tree[0] == '|' and isinstance(tree[1], ListType) and tree[1][0] == '&':
new_tree = ['&', ['|', tree[1][1], tree[2]],
['|', tree[1][2], tree[2]]]
return logicparser.apply_func(new_tree, self.dist_ors)
return tree
def to_infix(self, tree):
r"""
Convert a parse tree from prefix to infix form.
INPUT:
- ``tree`` -- a list; this represents a branch
of a parse tree
OUTPUT:
A new list.
EXAMPLES:
This example shows how to convert a parse tree from prefix to
infix form::
sage: import sage.logic.propcalc as propcalc, sage.logic.logicparser as logicparser
sage: s = propcalc.formula("(a&b)|(a&c)")
sage: tree = ['|', ['&', 'a', 'b'], ['&', 'a', 'c']]
sage: logicparser.apply_func(tree, s.to_infix)
[['a', '&', 'b'], '|', ['a', '&', 'c']]
.. NOTE::
This function only operates on a single branch of a parse tree.
To apply the function to an entire parse tree, pass the function
as an argument to :func:`~sage.logic.logicparser.apply_func()`
in :mod:`~sage.logic.logicparser`.
"""
if tree[0] != '~':
return [tree[1], tree[0], tree[2]]
return tree
def convert_expression(self):
r"""
Convert the string representation of a formula to conjunctive
normal form.
EXAMPLES::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a^b<->c")
sage: s.convert_expression(); s
a^b<->c
"""
ttree = self.__tree[:]
ttree = logicparser.apply_func(ttree, self.to_infix)
self.__expression = ''
str_tree = str(ttree)
open_flag = False
i = 0
for c in str_tree:
if i < len(str_tree) - 1:
op = self.get_next_op(str_tree[i:])
if op == '|' and not open_flag:
self.__expression += '('
open_flag = True
if i < len(str_tree) - 2 and str_tree[i + 1] == '&' and open_flag:
open_flag = False
self.__expression += ')'
if str_tree[i:i + 4] == 'None':
i += 4
if i < len(str_tree) and str_tree[i] not in ' \',[]':
self.__expression += str_tree[i]
i += 1
if open_flag is True:
self.__expression += ')'
def get_next_op(self, str):
r"""
Return the next operator in a string.
INPUT:
- ``str`` -- a string; this contains a logical
expression
OUTPUT:
The next operator as a string.
EXAMPLES:
This example illustrates how to find the next operator in a formula::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("f&p")
sage: s.get_next_op("abra|cadabra")
'|'
.. NOTE::
The parameter ``str`` is not necessarily the string
representation of the calling object.
"""
i = 0
while i < len(str) - 1 and str[i] != '&' and str[i] != '|':
i += 1
return str[i]
| 30.123077
| 124
| 0.517016
|
512378e4b4bdc4f9ff898a11238dd2c0f22457db
| 5,956
|
py
|
Python
|
example-multiagent/rllib/ppo_agent_one_load_carla.py
|
AveryWenwenSi/macad-gym
|
c720c86bc5d13a437c7866955173916d40bb01b3
|
[
"MIT"
] | null | null | null |
example-multiagent/rllib/ppo_agent_one_load_carla.py
|
AveryWenwenSi/macad-gym
|
c720c86bc5d13a437c7866955173916d40bb01b3
|
[
"MIT"
] | null | null | null |
example-multiagent/rllib/ppo_agent_one_load_carla.py
|
AveryWenwenSi/macad-gym
|
c720c86bc5d13a437c7866955173916d40bb01b3
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from gym.spaces import Box, Discrete
import ray
from ray import tune
from ray.rllib.agents.ppo.ppo import PPOAgent
from ray.rllib.agents.ppo.ppo_policy_graph import PPOPolicyGraph
from ray.rllib.models import ModelCatalog
from ray.rllib.models.preprocessors import Preprocessor
from ray.tune import run_experiments
from ray.tune.registry import register_env
from env_wrappers import wrap_deepmind
from models import register_mnih15_shared_weights_net
from ray.tune import run_experiments
from ray.tune.logger import pretty_print
import gym
import macad_gym
parser = argparse.ArgumentParser()
parser.add_argument("--num-iters", type=int, default=20)
parser.add_argument(
"--num-workers",
default=2,
type=int,
help="Num workers (CPU cores) to use")
parser.add_argument(
"--num-gpus", default=1, type=int, help="Number of gpus to use. Default=1")
parser.add_argument(
"--sample-bs-per-worker",
default=50,
type=int,
help="Number of samples in a batch per worker. Default=50")
parser.add_argument(
"--train-bs",
default=128,
type=int,
help="Train batch size. Use as per available GPU mem. Default=500")
parser.add_argument(
"--envs-per-worker",
default=1,
type=int,
help="Number of env instances per worker. Default=10")
parser.add_argument(
"--notes",
default=None,
help="Custom experiment description to be added to comet logs")
register_mnih15_shared_weights_net()
model_name = "mnih15_shared_weights"
env_name = "HeteNcomIndePOIntrxMATLS1B2C1PTWN3-v0"
env = gym.make(env_name)
env_actor_configs = env.configs
num_framestack = env_actor_configs["env"]["framestack"]
def env_creator(env_config):
import macad_gym
env = gym.make("HeteNcomIndePOIntrxMATLS1B2C1PTWN3-v0")
# Apply wrappers to: convert to Grayscale, resize to 84 x 84,
# stack frames & some more op
env = wrap_deepmind(env, dim=84, num_framestack=num_framestack)
return env
register_env(env_name, lambda config: env_creator(config))
# Placeholder to enable use of a custom pre-processor
class ImagePreproc(Preprocessor):
def _init_shape(self, obs_space, options):
shape = (84, 84, 3) # Adjust third dim if stacking frames
return shape
def transform(self, observation):
return observation
ModelCatalog.register_custom_preprocessor("sq_im_84", ImagePreproc)
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
obs_space = Box(0.0, 255.0, shape=(84, 84, 3))
act_space = Discrete(9)
def gen_policy():
model_config = {
# Model and preprocessor options.
"model": {
"custom_model": model_name,
"custom_options": {
# Custom notes for the experiment
"notes": {
"notes": args.notes
},
},
# NOTE:Wrappers are applied by RLlib if custom_preproc is NOT
# specified
"custom_preprocessor": "sq_im_84",
"dim": 84,
"free_log_std": False, # if args.discrete_actions else True,
"grayscale": True,
# conv_filters to be used with the custom CNN model.
# "conv_filters": [[16, [4, 4], 2], [32, [3, 3], 2],
# [16, [3, 3], 2]]
},
# preproc_pref is ignored if custom_preproc is specified
# "preprocessor_pref": "deepmind",
# env_config to be passed to env_creator
"env_config": env_actor_configs
}
return (PPOPolicyGraph, obs_space, act_space, model_config)
policy_graphs = {
a_id: gen_policy()
for a_id in env_actor_configs["actors"].keys()
}
# policy_ids = list(policies.keys())
def policy_mapping_fn(agent_id):
return "ppo_policy"
ppo_trainer = PPOAgent(
env=env_name,
config={
"multiagent": {
"policy_graphs": policy_graphs,
"policy_mapping_fn": policy_mapping_fn,
# "policies_to_train": ["car1"],
},
# disable filters, otherwise we would need to synchronize those
# as well to the DQN agent
# "observation_filter": "NoFilter",
})
# disable DQN exploration when used by the PPO trainer
# ppo_trainer.optimizer.foreach_evaluator(
# lambda ev: ev.for_policy(
# lambda pi: pi.set_epsilon(0.0), policy_id="dqn_policy"))
for i in range(args.num_iters):
print("== Iteration", i, "==")
# improve the PPO policy
print(pretty_print(ppo_trainer.train()))
print("Pre-training done.")
# Start our actual experiment.
stop = {
"episode_reward_mean": args.stop_reward,
"timesteps_total": args.stop_timesteps,
"training_iteration": args.stop_iters,
}
new_trainer = PPOAgent(
env=env_name,
config={
"multiagent": {
"policy_graphs": policy_graphs,
"policy_mapping_fn": policy_mapping_fn,
"policies_to_train": ["car1"],
},
},
stop=stop,
)
untrained_weights = ppo_trainer.get_weights()
new_trainer.restore(checkpoint)
new_trainer.set_weights(
{pid: w
for pid, w in untrained_weights.items() if pid != "car1"})
new_checkpoint = new_trainer.save()
new_trainer.stop()
print(".. checkpoint to restore from (all policies reset, "
f"except policy_0): {new_checkpoint}")
# print("Starting new tune.run")
for i in range(args.num_iters):
print("== Iteration", i, "==")
# improve the PPO policy
print("-- PPO --")
print(pretty_print(ppo_trainer.train()))
| 31.183246
| 79
| 0.631464
|
bcbf587d1419457aa67901d39e2a9defa8fcf37f
| 1,890
|
py
|
Python
|
LeetCode/10-Regular-Expression-Matching☆☆☆☆/Regular-Expression-Matching2.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | 10
|
2020-07-06T11:00:58.000Z
|
2022-01-29T09:25:24.000Z
|
LeetCode/10-Regular-Expression-Matching☆☆☆☆/Regular-Expression-Matching2.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | null | null | null |
LeetCode/10-Regular-Expression-Matching☆☆☆☆/Regular-Expression-Matching2.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | 3
|
2020-07-13T06:39:23.000Z
|
2020-08-15T16:29:48.000Z
|
class Solution:
def isMatch(self, s: str, p: str) -> bool:
T = [[False] * (len(p) + 1) for _ in range(len(s) + 1)]
T[0][0] = True
for j in range(len(p)):
if p[j] == "*":
T[0][j+1] = T[0][j-1]
for i in range(len(s)):
for j in range(len(p)):
if s[i] == p[j] or p[j] == ".":
T[i+1][j+1] = T[i][j]
elif p[j] == "*":
T[i+1][j+1] = T[i+1][j-1]
if not T[i+1][j+1]:
if p[j-1] == "." or p[j-1] == s[i]:
T[i+1][j+1] = T[i][j+1]
else:
continue
return T[-1][-1]
if __name__ == '__main__':
so = Solution()
assert so.isMatch("ab", ".*..c*") == True
assert so.isMatch("a", ".*..a*") == False
assert so.isMatch("ab", ".*..") == True
assert so.isMatch("aa", "a") == False
assert so.isMatch("aa", "a*") == True
assert so.isMatch("ab", ".*") == True
assert so.isMatch("aab", "c*a*b") == True
assert so.isMatch("mississippi", "mis*is*p*.") == False
assert so.isMatch("ab", "a.") == True
assert so.isMatch("aa", "a.") == True
assert so.isMatch("a", "a.") == False
assert so.isMatch("axb", "a.b*") == True
assert so.isMatch("ax", "a.b*") == True
assert so.isMatch("axc", "a.b*") == False
assert so.isMatch("ac", "a.*") == True
assert so.isMatch("abc", "a.*") == True
assert so.isMatch("ab", "ab.*") == True
assert so.isMatch("abcde", "ab.*") == True
assert so.isMatch("ac", "ab.*") == False
assert so.isMatch("ab", "a.*b") == True
assert so.isMatch("ab", "a.*b*") == True
assert so.isMatch("abb", "a.*b*") == True
assert so.isMatch("aaa", "a*a") == True
assert so.isMatch("aaa", "ab*ac*a") == True
assert so.isMatch("aaa", "ab*a*c*a") == True
| 39.375
| 63
| 0.444974
|
78c5b16e7aa5f91f48fdeafef85bc3504ec1f730
| 1,498
|
py
|
Python
|
pizza/tests/test_slice_growing.py
|
purrcat259/n-n-hashcode
|
98a1c443e6112903bc29a858bc18476a6635d460
|
[
"MIT"
] | null | null | null |
pizza/tests/test_slice_growing.py
|
purrcat259/n-n-hashcode
|
98a1c443e6112903bc29a858bc18476a6635d460
|
[
"MIT"
] | null | null | null |
pizza/tests/test_slice_growing.py
|
purrcat259/n-n-hashcode
|
98a1c443e6112903bc29a858bc18476a6635d460
|
[
"MIT"
] | null | null | null |
from pizza.input import ExampleInput
from pizza.models.slice import Slice
"""
TTTTT
TMMMT
TTTTT
"""
example_input = ExampleInput()
example_input.read_file()
data = example_input.data
class TestSliceGrowing:
s = None
def setup_method(self):
self.s = Slice(data, (0, 0))
def test_grow_row(self):
assert 1 == self.s.size()
assert 1 == self.s.tomato
assert 0 == self.s.mushroom
self.s.growRow(data)
assert 2 == self.s.size()
assert 2 == self.s.tomato
assert 0 == self.s.mushroom
self.s.growRow(data)
assert 3 == self.s.size()
assert 3 == self.s.tomato
assert 0 == self.s.mushroom
def test_grow_column(self):
assert 1 == self.s.size()
assert 1 == self.s.tomato
assert 0 == self.s.mushroom
self.s.growCol(data)
assert 2 == self.s.size()
assert 2 == self.s.tomato
assert 0 == self.s.mushroom
self.s.growCol(data)
assert 3 == self.s.size()
assert 3 == self.s.tomato
assert 0 == self.s.mushroom
def test_grow_both(self):
assert 1 == self.s.size()
assert 1 == self.s.tomato
assert 0 == self.s.mushroom
self.s.growBoth(data)
assert 4 == self.s.size()
assert 3 == self.s.tomato
assert 1 == self.s.mushroom
self.s.growBoth(data)
assert 9 == self.s.size()
assert 7 == self.s.tomato
assert 2 == self.s.mushroom
| 24.557377
| 36
| 0.570761
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.