hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
522ffb6fe0b4c3f9c1d46796a54983ebbc2182c3
| 438
|
py
|
Python
|
thinkpython_allen_downey/exercise_6_5.py
|
alirkaya/programming-textbook-solutions
|
7362dce474b8a881d654f95604e09d1d0e76aec2
|
[
"MIT"
] | null | null | null |
thinkpython_allen_downey/exercise_6_5.py
|
alirkaya/programming-textbook-solutions
|
7362dce474b8a881d654f95604e09d1d0e76aec2
|
[
"MIT"
] | null | null | null |
thinkpython_allen_downey/exercise_6_5.py
|
alirkaya/programming-textbook-solutions
|
7362dce474b8a881d654f95604e09d1d0e76aec2
|
[
"MIT"
] | null | null | null |
def ack(m, n):
if m < 0 or n < 0:
return -1
elif m == 0:
return n+1
elif n == 0:
return ack(m-1, 1)
else:
return ack(m-1, ack(m, n-1))
print(ack(3, 4))
# For large values of m and n, the function gives RecursionError. Because,
# by default, there is a fixed number of recursion is allowed. If the function
# tries to go beyond that number, then Python throws an Error and exits function.
| 27.375
| 81
| 0.614155
|
69419c4414e47f7c862e377d3dfa69a12df5b5d0
| 10,480
|
py
|
Python
|
openscad_libraries/NopSCADlib/scripts/bom.py
|
hongming/feixingchuan
|
3e3420fe401dfd2385594bd0da11caf01eb753d3
|
[
"MIT"
] | null | null | null |
openscad_libraries/NopSCADlib/scripts/bom.py
|
hongming/feixingchuan
|
3e3420fe401dfd2385594bd0da11caf01eb753d3
|
[
"MIT"
] | null | null | null |
openscad_libraries/NopSCADlib/scripts/bom.py
|
hongming/feixingchuan
|
3e3420fe401dfd2385594bd0da11caf01eb753d3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# NopSCADlib Copyright Chris Palmer 2018
# nop.head@gmail.com
# hydraraptor.blogspot.com
#
# This file is part of NopSCADlib.
#
# NopSCADlib is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# NopSCADlib is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with NopSCADlib.
# If not, see <https://www.gnu.org/licenses/>.
#
#! Generates BOM files for the project.
from __future__ import print_function
import os
import sys
import shutil
import openscad
from time import *
from set_config import *
import json
import re
def find_scad_file(mname):
for filename in os.listdir(source_dir):
if filename[-5:] == ".scad":
#
# look for module which makes the assembly
#
with open(source_dir + "/" + filename, "r") as f:
for line in f.readlines():
words = line.split()
if len(words) and words[0] == "module":
module = words[1].split('(')[0]
if module == mname:
return filename
return None
class Part:
def __init__(self, args):
self.count = 1
for arg in args:
arg = arg.replace('true', 'True').replace('false', 'False').replace('undef', 'None')
exec('self.' + arg)
def data(self):
return self.__dict__
class BOM:
def __init__(self, name):
self.name = name
self.big = None
self.ngb = False
self.zoomed = 0
self.count = 1
self.vitamins = {}
self.printed = {}
self.routed = {}
self.assemblies = {}
def flat_data(self):
assemblies = {}
for ass in self.assemblies:
assemblies[ass] = self.assemblies[ass].count
return {
"name" : self.name,
"big" : self.big,
"ngb" : self.ngb,
"zoomed" : self.zoomed,
"count" : self.count,
"assemblies" : assemblies,
"vitamins" : {v : self.vitamins[v].data() for v in self.vitamins},
"printed" : {p : self.printed[p].data() for p in self.printed},
"routed" : {r : self.routed[r].data() for r in self.routed}
}
def add_part(self, s):
args = []
match = re.match(r'^(.*?\.stl|.*?\.dxf)\((.*)\)$', s) #look for name.stl(...) or name.dxf(...)
if match:
s = match.group(1)
args = [match.group(2)]
if s[-4:] == ".stl":
parts = self.printed
else:
if s[-4:] == ".dxf":
parts = self.routed
else:
parts = self.vitamins
if s in parts:
parts[s].count += 1
else:
parts[s] = Part(args)
def add_assembly(self, ass, args = []):
if ass in self.assemblies:
self.assemblies[ass].count += 1
else:
bom = BOM(ass)
for arg in args:
arg = arg.replace('true', 'True').replace('false', 'False').replace('undef', 'None')
exec('bom.' + arg, locals())
self.assemblies[ass] = bom
def make_name(self, ass):
if self.count == 1:
return ass
return ass.replace("assembly", "assemblies")
def print_bom(self, breakdown, file = None):
if self.vitamins:
print("Vitamins:", file=file)
if breakdown:
longest = 0
for ass in self.assemblies:
name = ass.replace("_assembly","")
longest = max(longest, len(name))
for i in range(longest):
line = ""
for ass in sorted(self.assemblies):
name = ass.replace("_assembly","").replace("_"," ").capitalize()
index = i - (longest - len(name))
if index < 0:
line += " "
else:
line += (" %s " % name[index])
print(line[:-1], file=file)
for part in sorted(self.vitamins):
if ': ' in part:
part_no, description = part.split(': ')
else:
part_no, description = "", part
if breakdown:
for ass in sorted(self.assemblies):
bom = self.assemblies[ass]
if part in bom.vitamins:
file.write("%2d|" % bom.vitamins[part].count)
else:
file.write(" |")
print("%3d" % self.vitamins[part].count, description, file=file)
if self.printed:
if self.vitamins:
print(file=file)
print("Printed:", file=file)
for part in sorted(self.printed):
if breakdown:
for ass in sorted(self.assemblies):
bom = self.assemblies[ass]
if part in bom.printed:
file.write("%2d|" % bom.printed[part].count)
else:
file.write(" |")
print("%3d" % self.printed[part].count, part, file=file)
if self.routed:
print(file=file)
print("CNC cut:", file=file)
for part in sorted(self.routed):
if breakdown:
for ass in sorted(self.assemblies):
bom = self.assemblies[ass]
if part in bom.routed:
file.write("%2d|" % bom.routed[part].count)
else:
file.write(" |")
print("%3d" % self.routed[part].count, part, file=file)
if self.assemblies:
print(file=file)
print("Assemblies:", file=file)
for ass in sorted(self.assemblies):
print("%3d %s" % (self.assemblies[ass].count, self.assemblies[ass].make_name(ass)), file=file)
def parse_bom(file = "openscad.log", name = None):
main = BOM(name)
main.ordered_assemblies = []
stack = []
prog = re.compile(r'^(.*)\((.*)\)$')
for line in open(file):
pos = line.find('ECHO: "~')
if pos > -1:
s = line[pos + 8 : line.rfind('"')]
if s[-1] == '{':
ass = s[:-1]
args = []
match = prog.match(ass) #look for (...)
if match:
ass = match.group(1)
args = match.group(2).split(',')
if stack:
main.assemblies[stack[-1]].add_assembly(ass) #add to nested BOM
stack.append(ass)
main.add_assembly(ass, args) #add to flat BOM
if ass in main.ordered_assemblies:
main.ordered_assemblies.remove(ass)
main.ordered_assemblies.insert(0, ass)
else:
if s[0] == '}':
if s[1:] != stack[-1]:
raise Exception("Mismatched assembly " + s[1:] + str(stack))
stack.pop()
else:
main.add_part(s)
if stack:
main.assemblies[stack[-1]].add_part(s)
else:
if 'ERROR:' in line or 'WARNING:' in line:
raise Exception(line[:-1])
return main
def usage():
print("\nusage:\n\tbom [target_config] [<accessory_name>_assembly] - Generate BOMs for a project or an accessory to a project.")
sys.exit(1)
def boms(target = None, assembly = None):
try:
bom_dir = set_config(target, usage) + "bom"
if assembly:
bom_dir += "/accessories"
if not os.path.isdir(bom_dir):
os.makedirs(bom_dir)
else:
assembly = "main_assembly"
if os.path.isdir(bom_dir):
shutil.rmtree(bom_dir)
sleep(0.1)
os.makedirs(bom_dir)
#
# Find the scad file that makes the module
#
scad_file = find_scad_file(assembly)
if not scad_file:
raise Exception("can't find source for " + assembly)
#
# make a file to use the module
#
bom_maker_name = source_dir + "/bom.scad"
with open(bom_maker_name, "w") as f:
f.write("use <%s>\n" % scad_file)
f.write("%s();\n" % assembly);
#
# Run openscad
#
openscad.run("-D", "$bom=2", "-D", "$preview=true", "-o", "openscad.echo", "-d", bom_dir + "/bom.deps", bom_maker_name)
os.remove(bom_maker_name)
print("Generating bom ...", end=" ")
main = parse_bom("openscad.echo", assembly)
if assembly == "main_assembly":
main.print_bom(True, open(bom_dir + "/bom.txt","wt"))
for ass in main.assemblies:
with open(bom_dir + "/" + ass + ".txt", "wt") as f:
bom = main.assemblies[ass]
print(bom.make_name(ass) + ":", file=f)
bom.print_bom(False, f)
data = [main.assemblies[ass].flat_data() for ass in main.ordered_assemblies]
with open(bom_dir + "/bom.json", 'w') as outfile:
json.dump(data, outfile, indent = 4)
print("done")
except Exception as e:
print(str(e))
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) > 3: usage()
if len(sys.argv) == 3:
target, assembly = sys.argv[1], sys.argv[2]
else:
if len(sys.argv) == 2:
if sys.argv[1][-9:] == "_assembly":
target, assembly = None, sys.argv[1]
else:
target, assembly = sys.argv[1], None
else:
target, assembly = None, None
if assembly:
if assembly[-9:] != "_assembly": usage()
boms(target, assembly)
| 35.167785
| 132
| 0.489504
|
36db47ef1c58b68b89f4cbef735f01acd3170c7a
| 1,188
|
py
|
Python
|
Analysis_software/marspy/stats.py
|
duderstadt-lab/Born-to-slide
|
00f3a7ef4aaefff636ae7ddbb9e1c9947b284967
|
[
"BSD-2-Clause"
] | null | null | null |
Analysis_software/marspy/stats.py
|
duderstadt-lab/Born-to-slide
|
00f3a7ef4aaefff636ae7ddbb9e1c9947b284967
|
[
"BSD-2-Clause"
] | null | null | null |
Analysis_software/marspy/stats.py
|
duderstadt-lab/Born-to-slide
|
00f3a7ef4aaefff636ae7ddbb9e1c9947b284967
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
from sklearn.utils import resample
def bootstrap(data, n_boot=10000, sample_size=1, estimator=np.mean):
"""
:param data: array with data
:param n_boot: number for bootstrapping iterations (default 10000)
:param sample_size: sample coverage ]0;1] (default 1)
:param estimator: default np.mean
:return: list of bootstrap samples
"""
return estimator(
[resample(data, replace=True, n_samples=int(sample_size * len(data))) for _ in range(n_boot)],
axis=1)
def calc_ci(data, ci=95):
"""
Calculates values for confidence interval
:param data: arrayed data
:param ci: confidence interval (default 95)
:return: lower_bound, upper_bound
"""
return np.percentile(data, 50 - ci / 2), np.percentile(data, 50 + ci / 2)
def significance(p):
"""
Returns significance symbol based on set alpha values
:param p: probability of statistical test
:return: string expression for significance
"""
if p < 0.001:
expression = "***"
elif p < 0.01:
expression = "**"
elif p < 0.05:
expression = "*"
else:
expression = "ns"
return expression
| 27.627907
| 102
| 0.643098
|
851f1f1ca5d658f8a88669a24c4094e400005ad4
| 4,627
|
py
|
Python
|
source/app/simple_cfar_clustering.py
|
WinterWinds-Robotics/pymmw
|
3e2841f24a6cd98ccbee10a0ee7479f394417708
|
[
"MIT"
] | null | null | null |
source/app/simple_cfar_clustering.py
|
WinterWinds-Robotics/pymmw
|
3e2841f24a6cd98ccbee10a0ee7479f394417708
|
[
"MIT"
] | null | null | null |
source/app/simple_cfar_clustering.py
|
WinterWinds-Robotics/pymmw
|
3e2841f24a6cd98ccbee10a0ee7479f394417708
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2019, Manfred Constapel
# This file is licensed under the terms of the MIT license.
#
#
# super-simple CFAR clustering
#
import os
import sys
try:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
__base__ = os.path.dirname(os.path.abspath(__file__))
while 'lib' not in [d for d in os.listdir(__base__) if os.path.isdir(os.path.join(__base__, d))]: __base__ = os.path.join(__base__, '..')
if __base__ not in sys.path: sys.path.append(__base__)
from lib.plot import *
except ImportError as e:
print(e, file=sys.stderr, flush=True)
sys.exit(3)
# ------------------------------------------------
def update(data, threshold=0.1):
if 'detected_points' not in data: return
X, Y, Z = [], [], []
for _, p in data['detected_points'].items():
x, y, z, d = p['x'], p['y'], p['z'], p['v']
X.append(x)
Y.append(y)
Z.append(z)
mx, my, mz, err = np.mean(X), np.mean(Y), np.mean(Z), np.sqrt(np.std(X)**2 + np.std(Y)**2 + np.std(Z)**2)
while err > threshold:
dmin, dmax, d = float('inf'), 0, []
for x, y, z in zip(X, Y, Z):
d.append(np.sqrt(x**2 + y**2 + z**2) - np.sqrt(mx**2 + my**2 + mz**2))
if d[-1] > dmax: dmax = d[-1]
if d[-1] < dmin: dmin = d[-1]
dhor = dmin + (dmax - dmin) / 2
k = 0
for i, r in enumerate(d):
if r > dhor:
d[i] = None
X.pop(i-k)
Y.pop(i-k)
Z.pop(i-k)
k += 1
if len(X) == 0: return
mx, my, mz, err = np.mean(X), np.mean(Y), np.mean(Z), np.sqrt(np.std(X)**2 + np.std(Y)**2 + np.std(Z)**2)
if k == 0 and err > threshold: return
for x, y, z in zip(X, Y, Z):
pt = Point((x, y, z), color=(0.5, 0.5, 0.5), size=3, marker='.')
ax.add_artist(pt)
pt = Point((mx, my, mz), color=(1.0, 0.0, 0.0), size=20, marker='+')
ax.add_artist(pt)
xm, ym, zm = ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()
az, el = ax.azim, ax.elev
if abs(az) > 90: x_ = max(xm)
else: x_ = min(xm)
if az < 0: y_ = max(ym)
else: y_ = min(ym)
if el < 0: z_ = max(zm)
else: z_ = min(zm)
xz = Point((mx, y_, mz), color=(1.0, 0.0, 0.0), size=3, marker='.')
ax.add_artist(xz)
yz = Point((x_, my, mz), color=(1.0, 0.0, 0.0), size=3, marker='.')
ax.add_artist(yz)
xy = Point((mx, my, z_), color=(1.0, 0.0, 0.0), size=3, marker='.')
ax.add_artist(xy)
if __name__ == "__main__":
if len (sys.argv[1:]) != 1:
print('Usage: {} {}'.format(sys.argv[0].split(os.sep)[-1], '<range_maximum>'))
sys.exit(1)
try:
range_max = float(sys.argv[1])
d = range_max # int(math.ceil(range_max))
# ---
fig = plt.figure(figsize=(6, 6))
ax = plt.subplot(1, 1, 1, projection='3d') # rows, cols, idx
ax.view_init(azim=-45, elev=15)
move_figure(fig, (0 + 45*2, 0 + 45*2))
fig.canvas.manager.set_window_title('...')
ax.set_title('CFAR Detection: Simple Clustering'.format(), fontsize=10)
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
ax.set_zlabel('z [m]')
ax.set_xlim3d((-d / 2, +d / 2))
ax.set_ylim3d((0, d))
ax.set_zlim3d((-d / 2, +d / 2))
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.xaxis._axinfo['grid']['linestyle'] = ':'
ax.yaxis._axinfo['grid']['linestyle'] = ':'
ax.zaxis._axinfo['grid']['linestyle'] = ':'
plt.tight_layout(pad=1)
ax.scatter(xs=[], ys=[], zs=[], marker='.', cmap='jet')
for child in ax.get_children():
if isinstance(child, art3d.Path3DCollection):
child.remove()
from itertools import product, combinations # a small cube (origin)
r = [-0.075, +0.075]
for s, e in combinations(np.array(list(product(r,r,r))), 2):
if np.sum(np.abs(s-e)) == r[1]-r[0]:
ax.plot3D(*zip(s,e), color="black", linewidth=0.5)
set_aspect_equal_3d(ax)
mpl.colors._colors_full_map.cache.clear() # avoid memory leak by clearing the cache
start_plot(fig, ax, update, 4)
except Exception as e:
print(e, file=sys.stderr, flush=True)
sys.exit(2)
| 28.213415
| 141
| 0.494489
|
6983d44e76e06a201faf18d1e3f8e8d0d78ffd34
| 572
|
py
|
Python
|
sacred/__about__.py
|
ssudholt/sacred
|
6fb05b1ee1b7706b44c3ebd852e1e234841ea2ce
|
[
"MIT"
] | 1
|
2019-06-12T00:31:12.000Z
|
2019-06-12T00:31:12.000Z
|
sacred/__about__.py
|
ssudholt/sacred
|
6fb05b1ee1b7706b44c3ebd852e1e234841ea2ce
|
[
"MIT"
] | null | null | null |
sacred/__about__.py
|
ssudholt/sacred
|
6fb05b1ee1b7706b44c3ebd852e1e234841ea2ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
"""
This module contains meta-information about the Sacred package.
It is kept simple and separate from the main module, because this information
is also read by the setup.py. And during installation the sacred module cannot
yet be imported.
"""
from __future__ import division, print_function, unicode_literals
__all__ = ("__version__", "__author__", "__author_email__", "__url__")
__version__ = "0.7.4-onurgu"
__author__ = 'Klaus Greff'
__author_email__ = 'klaus.greff@startmail.com'
__url__ = "https://github.com/IDSIA/sacred"
| 28.6
| 78
| 0.767483
|
2734c265a97e11d0eb2ccdba85cda7564e3ad825
| 1,878
|
py
|
Python
|
azure-mgmt-monitor/azure/mgmt/monitor/models/rule_email_action_py3.py
|
lmazuel/azure-sdk-for-python
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-monitor/azure/mgmt/monitor/models/rule_email_action_py3.py
|
lmazuel/azure-sdk-for-python
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-monitor/azure/mgmt/monitor/models/rule_email_action_py3.py
|
lmazuel/azure-sdk-for-python
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .rule_action import RuleAction
class RuleEmailAction(RuleAction):
"""Specifies the action to send email when the rule condition is evaluated.
The discriminator is always RuleEmailAction in this case.
All required parameters must be populated in order to send to Azure.
:param odatatype: Required. Constant filled by server.
:type odatatype: str
:param send_to_service_owners: Whether the administrators (service and
co-administrators) of the service should be notified when the alert is
activated.
:type send_to_service_owners: bool
:param custom_emails: the list of administrator's custom email addresses
to notify of the activation of the alert.
:type custom_emails: list[str]
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'odatatype': {'key': 'odata\\.type', 'type': 'str'},
'send_to_service_owners': {'key': 'sendToServiceOwners', 'type': 'bool'},
'custom_emails': {'key': 'customEmails', 'type': '[str]'},
}
def __init__(self, *, send_to_service_owners: bool=None, custom_emails=None, **kwargs) -> None:
super(RuleEmailAction, self).__init__(, **kwargs)
self.send_to_service_owners = send_to_service_owners
self.custom_emails = custom_emails
self.odatatype = 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction'
| 39.957447
| 99
| 0.651225
|
b6235d10ebec1f9027bac36f296e4a6f6b9bc605
| 2,739
|
py
|
Python
|
dadvisor/datatypes/container_info.py
|
dadvisor/core
|
31d59707eb9bf33f5bea4a8fb6fb1f0de9a37eba
|
[
"MIT"
] | null | null | null |
dadvisor/datatypes/container_info.py
|
dadvisor/core
|
31d59707eb9bf33f5bea4a8fb6fb1f0de9a37eba
|
[
"MIT"
] | null | null | null |
dadvisor/datatypes/container_info.py
|
dadvisor/core
|
31d59707eb9bf33f5bea4a8fb6fb1f0de9a37eba
|
[
"MIT"
] | null | null | null |
import json
import subprocess
import time
from prometheus_client import Info
from dadvisor.config import IP
INFO = Info('docker_container', 'Container info', ['hash'])
class ContainerInfo(object):
"""
Creates a ContainerInfo object with several properties.
Note that the ip property is added later (in :func: validate), as Docker
doesn't directly add an IP to the container.
"""
def __init__(self, hash, load):
self.hash = hash
self.created = str(load['Created'])
self.stopped = ''
self.names = load['Names']
self.image = str(load['Image'])
self.ports = load['Ports']
self.ip = ''
INFO.labels(hash=self.hash).info({
'host': IP,
'created': self.created,
'names': ','.join(self.names),
'image': self.image
})
def validate(self):
if self.stopped:
return
for name in self.names:
cmd = 'curl -s --unix-socket /var/run/docker.sock http://localhost/containers{}/json'.format(name)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
data = json.loads(p.communicate()[0].decode('utf-8'))
try:
key = data['State']['Status']
except KeyError:
key = ''
if 'message' in data or key != 'running':
self.stopped = int(time.time())
INFO.labels(hash=self.hash).info({
'host': IP,
'created': self.created,
'names': ','.join(self.names),
'image': self.image,
'stopped': str(self.stopped),
'ip': self.ip
})
return
elif 'NetworkSettings' in data:
if data['NetworkSettings']['IPAddress']:
self.ip = data['NetworkSettings']['IPAddress']
else:
networks = data['NetworkSettings']['Networks']
self.ip = next(iter(networks.values()))['IPAddress']
# TODO: instead of the first network value, take all values
INFO.labels(hash=self.hash).info({
'host': IP,
'created': self.created,
'names': ','.join(self.names),
'image': self.image,
'ip': self.ip
})
def __dict__(self):
return {
'hash': self.hash,
'created': self.created,
'stopped': self.stopped,
'names': self.names,
'ports': self.ports,
'image': self.image,
'ip': self.ip
}
| 33.814815
| 110
| 0.490325
|
6276c9a9fa86d34a675e1e3ba15c8bb55e54480a
| 2,359
|
py
|
Python
|
take_screenshots.py
|
ClaudeMetz/FactorioScripts
|
5aab7569acdf86ff65167584638a3dd7323d2d0b
|
[
"MIT"
] | 2
|
2020-12-26T12:21:33.000Z
|
2022-02-06T23:25:10.000Z
|
take_screenshots.py
|
ClaudeMetz/FactorioScripts
|
5aab7569acdf86ff65167584638a3dd7323d2d0b
|
[
"MIT"
] | null | null | null |
take_screenshots.py
|
ClaudeMetz/FactorioScripts
|
5aab7569acdf86ff65167584638a3dd7323d2d0b
|
[
"MIT"
] | null | null | null |
import json
import shutil
import subprocess
import sys
from pathlib import Path
import git # type: ignore
from PIL import Image # type: ignore
# Script config
MODNAME = sys.argv[1]
FACTORIO_PATH = sys.argv[2]
USERDATA_PATH = sys.argv[3]
cwd = Path.cwd()
repo = git.Repo(cwd)
def take_screenshots():
screenshotter_path = cwd / "scenarios" / "screenshotter"
if not screenshotter_path.is_dir():
print("- no screenshotter scenario found, aborting")
return
# Overwrite mod-list.json with the one found in the scenarios folder
current_modlist_path = Path(USERDATA_PATH) / "mods" / "mod-list.json"
current_modlist_path.unlink(missing_ok=True)
shutil.copy(str(screenshotter_path / "mod-list.json"), str(current_modlist_path))
print("- mod-list.json replaced")
# Run the screenshotting scenario, waiting for it to finish
print("- running scenario...", end=" ", flush=True)
subprocess.run([
"/usr/bin/open", "-W", "-a", FACTORIO_PATH, "--args",
"--load-scenario", "{}/screenshotter".format(MODNAME),
"--config", str(screenshotter_path / "config.ini"),
"--instrument-mod", MODNAME # use the same mod as the instrument mod for simplicity
]
)
print("done")
# Crop screenshots according to the given dimensions
script_output_path = Path(USERDATA_PATH, "script-output")
with (script_output_path / "dimensions.json").open("r") as file:
dimensions = json.load(file)
for scene, corners in dimensions.items():
screenshot_path = script_output_path / "{}.png".format(scene)
image = Image.open(screenshot_path)
cropped_img = image.crop((
corners["top_left"]["x"] - 15,
corners["top_left"]["y"] - 15,
corners["bottom_right"]["x"] + 15,
corners["bottom_right"]["y"] + 15
))
cropped_img.save(cwd / "screenshots" / "{}.png".format(scene))
print("- screenshots updated")
# Clean up script output
shutil.rmtree(script_output_path)
print("- script-output removed")
# Commit new screenshots
repo.git.add("-A")
repo.git.commit(m="Update screenshots")
print("- changes committed")
if __name__ == "__main__":
proceed = input(f"[{MODNAME}] Sure to take screenshots? (y/n): ")
if proceed == "y":
take_screenshots()
| 32.315068
| 92
| 0.647732
|
de192b6a2eebb68253930a5fae2bb136a8fa0d9a
| 71,252
|
py
|
Python
|
tensor2tensor/utils/t2t_model.py
|
Zhangyantsing/tensor2tensor
|
b6abf28a1a903c91eb75d7a102945a780899d6e9
|
[
"Apache-2.0"
] | 1
|
2018-12-12T18:50:28.000Z
|
2018-12-12T18:50:28.000Z
|
tensor2tensor/utils/t2t_model.py
|
Zhangyantsing/tensor2tensor
|
b6abf28a1a903c91eb75d7a102945a780899d6e9
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/utils/t2t_model.py
|
Zhangyantsing/tensor2tensor
|
b6abf28a1a903c91eb75d7a102945a780899d6e9
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T2TModel Base Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import functools
import math
import time
import six
from tensor2tensor.data_generators import multi_problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators.problem import problem_hparams_to_features
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities # pylint: disable=unused-import
from tensor2tensor.utils import beam_search
from tensor2tensor.utils import decoding
from tensor2tensor.utils import expert_utils as eu
from tensor2tensor.utils import learning_rate
from tensor2tensor.utils import metrics
from tensor2tensor.utils import optimize
from tensor2tensor.utils import quantization
from tensor2tensor.utils import registry
import tensorflow as tf
from tensorflow.python.layers import base
from tensorflow.python.ops import inplace_ops
from tensorflow.python.ops import variable_scope
_no_problem_err_str = (
"The default implementation of %s requires that the "
"model be used with a Problem. If using a Problem, augment the "
"hparams object with trainer_lib.add_problem_hparams. If not, "
"override %s.")
_no_problem_err = (
lambda method_name: _no_problem_err_str % (method_name, method_name))
class T2TModel(base.Layer):
"""Abstract base class for models.
Subclassess generally only need to override `body`.
"""
REGISTERED_NAME = None # Updated on registration.
def __init__(self,
hparams,
mode=tf.estimator.ModeKeys.TRAIN,
problem_hparams=None,
data_parallelism=None,
decode_hparams=None,
**kwargs):
"""Create a T2TModel.
Args:
hparams: tf.contrib.training.HParams, model hyperparameters.
mode: tf.estimator.ModeKeys, the execution mode.
problem_hparams: tf.contrib.training.HParams, hyperparameters for the
Problem. If provided here or in hparams.problem_hparams, the model will
automatically determine bottom, top, and loss methods. If not provided,
calling the model will only invoke body.
data_parallelism: a expert_utils.Parallelism object,
specifies devices for data parallelism.
decode_hparams: a hyperparameter object with decoding parameters.
See decoding.decode_hparams.
**kwargs: arguments to pass to base.Layer constructor.
Returns:
a T2TModel
"""
# Determine name first: use registered name if possible, class name else.
default_name = registry.default_name(type(self))
name = self.REGISTERED_NAME or default_name
super(T2TModel, self).__init__(
trainable=mode == tf.estimator.ModeKeys.TRAIN, name=name, **kwargs)
if not problem_hparams and hasattr(hparams, "problem_hparams"):
problem_hparams = hparams.problem_hparams
self._problem_hparams = problem_hparams
# Setup hparams
# If vocabularies differ, unset shared_embedding_and_softmax_weights.
hparams = copy.copy(hparams)
if self._problem_hparams and hparams.shared_embedding_and_softmax_weights:
same_vocab_sizes = True
if "inputs" in self._problem_hparams.input_modality:
if (self._problem_hparams.input_modality["inputs"] !=
self._problem_hparams.target_modality):
same_vocab_sizes = False
if not same_vocab_sizes:
log_info("Unsetting shared_embedding_and_softmax_weights.")
hparams.shared_embedding_and_softmax_weights = 0
self._original_hparams = hparams
self.set_mode(mode)
self._decode_hparams = copy.copy(decode_hparams or
decoding.decode_hparams())
self._data_parallelism = data_parallelism or eu.Parallelism([""])
self._num_datashards = self._data_parallelism.n
self._ps_devices = self._data_parallelism.ps_devices
self._eager_var_store = create_eager_var_store()
if self._problem_hparams:
self._create_modalities(self._problem_hparams, self._hparams)
if not common_layers.is_xla_compiled():
self.summarize_hparams()
self._variable_scopes = {}
def _add_variable_scope(self, key, vs):
if key not in self._variable_scopes:
self._variable_scopes[key] = vs
def summarize_hparams(self):
def create_hparams_summary(hparams, name):
hparams_strs = [tf.convert_to_tensor([k, str(v)])
for k, v in hparams.values().items()]
tf.summary.text(name, tf.stack(hparams_strs))
create_hparams_summary(self._hparams, "%s_hparams" % self.name)
if self._problem_hparams:
create_hparams_summary(self._problem_hparams,
"%s_problem_hparams" % self.name)
# Replace the two methods below in order to add custom SessionRunHooks to
# the training procedure.
@staticmethod
def train_hooks():
return []
@staticmethod
def eval_hooks():
return []
@property
def hparams(self):
return self._hparams
@property
def is_training(self):
return self._hparams.mode == tf.estimator.ModeKeys.TRAIN
@property
def has_input(self):
if self._problem_hparams:
return "inputs" in self._problem_hparams.input_modality
else:
return True
@property
def _custom_getter(self):
if self.hparams.weight_dtype == "bfloat16":
if self.hparams.optimizer != "Adafactor":
raise NotImplementedError(
"weight_dtype=bfloat16 only implemented with Adafactor optimizer")
return quantization.EighthPowerEncoding().custom_getter(
activation_dtype=tf.bfloat16
if self.hparams.activation_dtype == "bfloat16" else tf.float32)
elif self.hparams.activation_dtype == "bfloat16":
return quantization.bfloat16_activations_var_getter
else:
return None
@property
def _target_modality_is_real(self):
"""Whether the target modality is real-valued."""
target_modality = self._problem_hparams.target_modality
return target_modality.name.startswith("real_")
def call(self, inputs, **kwargs):
del kwargs
features = inputs
set_custom_getter_compose(self._custom_getter)
tf.get_variable_scope().set_initializer(
optimize.get_variable_initializer(self.hparams))
with self._eager_var_store.as_default():
self._fill_problem_hparams_features(features)
sharded_features = self._shard_features(features)
sharded_logits, losses = self.model_fn_sharded(sharded_features)
if isinstance(sharded_logits, dict):
concat_logits = {}
for k, v in six.iteritems(sharded_logits):
concat_logits[k] = tf.concat(v, 0)
return concat_logits, losses
else:
return tf.concat(sharded_logits, 0), losses
@property
def use_body_sharded(self):
return False
def body_sharded(self, sharded_features):
raise NotImplementedError("Models that wish to manually control sharding, "
"e.g. MoE models, should override body_sharded "
"and set use_body_sharded to True.")
def model_fn_sharded(self, sharded_features):
dp = self._data_parallelism
summarize_features(sharded_features, num_shards=dp.n)
datashard_to_features = self._to_features_per_datashard(sharded_features)
if self.use_body_sharded:
# MoE models override body_sharded
transformed_features = dp(self.bottom, datashard_to_features)
body_out = self.body_sharded(
self._to_single_features_dict(transformed_features))
body_out, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
sharded_logits = body_out
else:
if isinstance(body_out, dict):
sharded_logits = collections.OrderedDict()
sharded_losses = collections.OrderedDict()
for k, v in sorted(six.iteritems(body_out)):
sharded_logits[k] = dp(self.top, v, datashard_to_features)
sharded_losses[k] = dp(self.loss, sharded_logits[k],
datashard_to_features)
training_loss_dict = average_sharded_losses([{
"training": l
} for l in loss for loss in sharded_losses.values()])
losses.update(training_loss_dict)
else:
sharded_logits = dp(self.top, body_out, datashard_to_features)
sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)
if isinstance(sharded_losses, tuple):
nums, dens = sharded_losses
sharded_losses = zip(nums, dens)
training_loss_dict = average_sharded_losses([{
"training": loss
} for loss in sharded_losses])
losses.update(training_loss_dict)
else:
sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)
if isinstance(sharded_logits[0], dict):
temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}
for k, _ in six.iteritems(sharded_logits[0]):
for l in sharded_logits:
temp_dict[k].append(l[k])
sharded_logits = temp_dict
losses = average_sharded_losses(sharded_losses)
# TODO(rsepassi): Reenable scheduled sampling
# Disabled because of model_fn_sharded refactor
#
# do_scheduled_sampling = ( # Only do it if training and set for it.
# self.hparams.scheduled_sampling_prob > 0.0 and
# self.hparams.mode == tf.estimator.ModeKeys.TRAIN)
# if do_scheduled_sampling:
# sharded_logits, losses = scheduled_sampling(
# self.hparams, self._problem_hparams, dp,
# sharded_logits, losses, sharded_features,
# transformed_features, self)
return sharded_logits, losses
def model_fn(self, features):
with tf.variable_scope(tf.get_variable_scope(), use_resource=True) as vs:
self._add_variable_scope("model_fn", vs)
transformed_features = self.bottom(features)
if self.hparams.activation_dtype == "bfloat16":
for k, v in sorted(six.iteritems(transformed_features)):
if v.dtype == tf.float32:
transformed_features[k] = tf.cast(v, tf.bfloat16)
with tf.variable_scope("body") as body_vs:
self._add_variable_scope("body", body_vs)
log_info("Building model body")
body_out = self.body(transformed_features)
output, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
logits = output
else:
logits = self.top(output, features)
losses["training"] = 0.0
if (self._hparams.mode != tf.estimator.ModeKeys.PREDICT and
self._hparams.mode != "attack"):
losses["training"] = self.loss(logits, features)
return logits, losses
def bottom(self, features):
"""Transform features to feed into body."""
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
# Transform the input features
for key, input_modality in sorted(
six.iteritems(self._problem_hparams.input_modality)):
if key not in features:
tf.logging.warning("Missing feature %s - ignoring." % key)
continue
do_reuse = input_modality.name in all_previous_modalities
with tf.variable_scope(input_modality.name, reuse=do_reuse) as im_vs:
self._add_variable_scope(input_modality.name, im_vs)
log_info("Transforming feature '%s' with %s.bottom", key,
input_modality.name)
transformed_features[key] = input_modality.bottom(features[key])
all_previous_modalities.append(input_modality.name)
# Transform the targets (for autoregressive models)
target_modality = self._problem_hparams.target_modality
if isinstance(target_modality, dict):
for k, v in six.iteritems(target_modality):
if k in features:
# TODO(aidangomez): share variables?
with tf.variable_scope("%s/%s" % (v.name, k)) as tm_vs:
self._add_variable_scope("%s/%s" % (v.name, k), tm_vs)
log_info("Transforming '%s' with %s.targets_bottom", k, v.name)
transformed_features[k] = v.targets_bottom(features[k])
else:
tf.logging.warn("Modality not found in features: %s", k)
else:
with tf.variable_scope(target_modality.name) as tm_vs:
self._add_variable_scope(target_modality.name, tm_vs)
if "targets" in features:
log_info("Transforming 'targets' with %s.targets_bottom",
target_modality.name)
transformed_features["targets"] = target_modality.targets_bottom(
features["targets"])
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features
def body(self, features):
"""Most models will override this function.
Compute label logits for one shard as a function of the transformed
features.
Args:
features: A dictionary of key to Tensor. Each Tensor has shape
[batch_size, ?, ?, hidden_size].
Returns:
output: tensor of logits with shape [batch_size, O, P, body_output_size.
losses: either single loss as a scalar, a list, a tensor (to be averaged)
or a dictionary of losses.
"""
raise NotImplementedError("Abstract Method")
def _top_single(self, body_output, target_modality, features):
if not target_modality:
log_warn("Without a Problem, T2TModel.top is a passthrough.")
return body_output
with tf.variable_scope(target_modality.name) as tm_vs:
self._add_variable_scope(tm_vs.name, tm_vs)
log_info("Transforming body output with %s.top", target_modality.name)
last_only = (
target_modality.top_is_pointwise and
self.hparams.mode == tf.estimator.ModeKeys.PREDICT and
not self.hparams.force_full_predict)
if not last_only:
logits = target_modality.top(body_output, features.get("targets"))
else:
# Take body outputs for the last position only, and targets too.
if "decode_loop_step" not in features:
last_position_body_output = tf.expand_dims(
body_output[:, -1, :, :], axis=[1])
last_position_targets = tf.expand_dims(
features["targets"][:, -1:, :, :], axis=[1])
else:
body_output_shape = body_output.shape.as_list()
last_position_body_output = tf.slice(
body_output, [0, features["decode_loop_step"][0], 0, 0], [
body_output_shape[0], 1, body_output_shape[2],
body_output_shape[3]
])
target_shape = features["targets"].shape.as_list()
last_position_targets = tf.slice(
features["targets"], [0, features["decode_loop_step"][0], 0, 0],
[target_shape[0], 1, target_shape[2], target_shape[3]])
logits = target_modality.top(last_position_body_output,
last_position_targets)
return logits
def top(self, body_output, features):
if isinstance(body_output, dict):
if self._problem_hparams:
target_modality = self._problem_hparams.target_modality
else:
target_modality = {k: None for k in body_output.keys()}
for k in body_output.keys():
assert k in target_modality.keys(), (
"The key %s of model_body's returned logits dict must be in "
"problem_hparams.target_modality's dict." % k)
logits = {}
for k, v in six.iteritems(body_output):
# TODO(aidangomez): share variables here?
with tf.variable_scope(k) as top_vs:
self._add_variable_scope("top_%s" % k, top_vs)
logits[k] = self._top_single(v, target_modality[k], features)
return logits
else:
if self._problem_hparams:
target_modality = self._problem_hparams.target_modality
else:
target_modality = None
if isinstance(target_modality, dict):
assert "targets" in target_modality, (
"model_body returned single logits so 'targets' must be a key "
"since problem_hparams.target_modality is a dict.")
target_modality = target_modality["targets"]
return self._top_single(body_output, target_modality, features)
def _loss_single(self, logits, target_modality, feature):
# The current bfloat16 version still uses float32 for most parts of backward
# propagation to keep model quality, so cast back before computing the loss
# value.
if not target_modality:
log_warn(_no_problem_err("loss"))
return (tf.constant(0., dtype=tf.float32),
tf.constant(1., dtype=tf.float32))
loss_num, loss_den = target_modality.loss(logits, feature)
loss_num *= self._problem_hparams.loss_multiplier
if hasattr(self.hparams, "problem") and hasattr(
self.hparams.problem, "task_list"):
loss_num, loss_den, summaries = multi_problem.aggregate_task_losses(
self.hparams,
self._problem_hparams,
logits,
target_modality,
feature
)
for key, val in summaries:
tf.summary.scalar(key, val)
return loss_num, loss_den
def loss(self, logits, features):
if isinstance(logits, dict):
if self._problem_hparams:
target_modality = self._problem_hparams.target_modality
else:
target_modality = {k: None for k in logits.keys()}
for k in logits.keys():
assert k in target_modality.keys(), (
"The key %s of model_body's returned logits dict must be in "
"problem_hparams.target_modality's dict." % k)
losses = {}
for k, v in six.iteritems(logits):
losses[k] = self._loss_single(v, target_modality[k], features[k])
n, d = losses[k]
if common_layers.should_generate_summaries():
tf.summary.scalar(k + "_loss", n / d)
tf.summary.scalar(k + "_loss_num", n)
tf.summary.scalar(k + "_loss_den", d)
return tf.add_n([n / d for n, d in losses.values()])
else:
if self._problem_hparams:
target_modality = self._problem_hparams.target_modality
else:
target_modality = None
if isinstance(target_modality, dict):
assert "targets" in target_modality, (
"model_body returned single logits so 'targets' must be a key "
"since problem_hparams.target_modality is a dict.")
target_modality = target_modality["targets"]
return self._loss_single(logits, target_modality, features["targets"])
def optimize(self, loss, num_async_replicas=1, use_tpu=False):
"""Return a training op minimizing loss."""
lr = learning_rate.learning_rate_schedule(self.hparams)
if num_async_replicas > 1:
log_info("Dividing learning rate by num_async_replicas: %d",
num_async_replicas)
lr /= math.sqrt(float(num_async_replicas))
train_op = optimize.optimize(loss, lr, self.hparams, use_tpu=use_tpu)
return train_op
def set_mode(self, mode):
"""Set hparams with the given mode."""
log_info("Setting T2TModel mode to '%s'", mode)
hparams = copy.copy(self._original_hparams)
hparams.add_hparam("mode", mode)
# When not in training mode, set all forms of dropout to zero.
if mode != tf.estimator.ModeKeys.TRAIN:
for key in hparams.values():
if key.endswith("dropout") or key == "label_smoothing":
log_info("Setting hparams.%s to 0.0", key)
setattr(hparams, key, 0.0)
self._hparams = hparams
def _create_modalities(self, problem_hparams, hparams):
"""Construct modalities in problem_hparams."""
input_modality_overrides = {}
for override_str in hparams.input_modalities.split(";"):
if override_str != "default":
parts = override_str.split(":")
feature_name = parts[0]
modality_name = ":".join(parts[1:])
input_modality_overrides[feature_name] = modality_name
target_modality_name = None
if hparams.target_modality and hparams.target_modality != "default":
target_modality_name = hparams.target_modality
input_modality = {}
for f, modality_spec in six.iteritems(problem_hparams.input_modality):
if f in input_modality_overrides:
_warn_changed_modality_type(input_modality_overrides[f],
modality_spec[0], f)
modality_spec = (input_modality_overrides[f], modality_spec[1])
input_modality[f] = registry.create_modality(modality_spec, hparams)
problem_hparams.input_modality = input_modality
if isinstance(problem_hparams.target_modality, dict):
target_modality = {}
for f, modality_spec in six.iteritems(problem_hparams.target_modality):
# TODO(lukaszkaiser): allow overriding other target modalities.
if target_modality_name and f == "targets":
_warn_changed_modality_type(target_modality_name, modality_spec[0],
"target_modality/%s" % f)
modality_spec = (target_modality_name, modality_spec[1])
target_modality[f] = registry.create_modality(modality_spec, hparams)
else:
target_modality_spec = problem_hparams.target_modality
if target_modality_name:
_warn_changed_modality_type(target_modality_name,
target_modality_spec[0], "target")
target_modality_spec = (target_modality_name, target_modality_spec[1])
target_modality = registry.create_modality(target_modality_spec, hparams)
problem_hparams.target_modality = target_modality
def prepare_features_for_infer(self, features):
"""Called before inference to allow adding infer-specific features."""
pass
def eval_autoregressive(self, features=None, decode_length=50):
"""Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
"""
results = self._slow_greedy_infer(features, decode_length=decode_length)
return results["logits"], results["losses"]
def _fill_problem_hparams_features(self, features):
if features is not None:
for k, v in sorted(
six.iteritems(problem_hparams_to_features(self._problem_hparams))):
if k not in features:
features[k] = tf.constant(v, name=k)
def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0,
use_tpu=False):
"""A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
}
"""
set_custom_getter_compose(self._custom_getter)
with self._eager_var_store.as_default():
# TODO(rsepassi): Make decoding work with real-valued model outputs
# (i.e. if the target modality is RealModality).
self.prepare_features_for_infer(features)
if not self.has_input and beam_size > 1:
log_warn("Beam searching for a model with no inputs.")
if not self.has_input and self.hparams.sampling_method != "random":
log_warn("Non-random sampling for a model with no inputs.")
self._fill_problem_hparams_features(features)
if self._problem_hparams:
target_modality = self._problem_hparams.target_modality
if target_modality.is_class_modality:
beam_size = 1 # No use to run beam-search for a single class.
if beam_size == 1:
log_info("Greedy Decoding")
results = self._greedy_infer(features, decode_length, use_tpu)
else:
log_info("Beam Decoding with beam size %d" % beam_size)
results = self._beam_decode(features, decode_length, beam_size,
top_beams, alpha)
return results
def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha):
"""Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
return self._beam_decode_slow(features, decode_length, beam_size, top_beams,
alpha)
def _beam_decode_slow(self, features, decode_length, beam_size, top_beams,
alpha):
"""Slow version of Beam search decoding.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
batch_size = common_layers.shape_list(features["inputs"])[0]
def symbols_to_logits_fn(ids):
"""Go from ids to logits."""
ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]])
if "partial_targets" in features:
pt = features["partial_targets"]
pt_length = common_layers.shape_list(pt)[1]
pt = tf.tile(pt, [1, beam_size])
pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1])
ids = tf.concat([pt, ids], axis=1)
features["targets"] = ids
self._coverage = None
logits, _ = self(features) # pylint: disable=not-callable
# now self._coverage is a coverage tensor for the first datashard.
# it has shape [batch_size] and contains floats between 0 and
# source_length.
if self._problem_hparams:
modality = self._problem_hparams.target_modality
if modality.top_is_pointwise:
return tf.squeeze(logits, axis=[1, 2, 3])
# -1 due to the pad above.
current_output_position = common_layers.shape_list(ids)[1] - 1
logits = logits[:, current_output_position, :, :]
return tf.squeeze(logits, axis=[1, 2])
initial_ids = tf.zeros([batch_size], dtype=tf.int32)
if self.has_input:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 1)
if len(features["inputs"].shape) < 5:
features["inputs"] = tf.expand_dims(features["inputs"], 4)
# Expand the inputs in to the beam size.
features["inputs"] = tf.tile(features["inputs"], [1, beam_size, 1, 1, 1])
s = common_layers.shape_list(features["inputs"])
features["inputs"] = tf.reshape(features["inputs"],
[s[0] * s[1], s[2], s[3], s[4]])
target_modality = self._problem_hparams.target_modality
vocab_size = target_modality.top_dimensionality
# Setting decode length to input length + decode_length
decode_length = tf.constant(decode_length)
if "partial_targets" not in features:
inputs = features["inputs"]
decode_length = (common_layers.shape_list(inputs)[1] +
features.get("decode_length", decode_length))
ids, scores = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
stop_early=(top_beams == 1))
# Set inputs back to the unexpanded inputs to not to confuse the Estimator!
if self.has_input:
features["inputs"] = inputs_old
# Return `top_beams` decodings (also remove initial id from the beam search)
# TODO(lukaszkaiser): make it work multi-problem.
if top_beams == 1:
samples = ids[:, 0, 1:]
else:
samples = ids[:, :top_beams, 1:]
return {"outputs": samples, "scores": scores}
def _greedy_infer(self, features, decode_length, use_tpu=False):
"""A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
return (self._slow_greedy_infer_tpu(features, decode_length)
if use_tpu else self._slow_greedy_infer(features, decode_length))
def _slow_greedy_infer_tpu(self, features, decode_length):
"""A slow greedy inference method on TPU.
Quadratic time in decode_length.
Args:
features: An map of string to `Tensor`.
decode_length: An integer, how many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
if not self.has_input:
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
features["partial_targets"] = tf.to_int64(partial_targets)
# Save the targets in a var and reassign it after the tf.while loop to avoid
# having targets being in a 'while' frame. This ensures targets when used
# in metric functions stays in the same frame as other vars.
targets_old = features.get("targets", None)
target_modality = self._problem_hparams.target_modality
def infer_step(i, recent_output, recent_logits, unused_loss):
"""Inference step."""
if not tf.contrib.eager.in_eager_mode():
recent_output.set_shape([None, None, None, 1])
padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]])
features["targets"] = padded
# This is inefficient in that it generates samples at all timesteps,
# not just the last one, except if target_modality is pointwise.
features["decode_loop_step"] = i
samples, logits, losses = self.sample(features)
# Concatenate the already-generated recent_output with last timestep
# of the newly-generated samples.
if target_modality.top_is_pointwise:
cur_sample = samples[:, -1, :, :]
else:
cur_sample = samples[:, i, :, :]
samples = tf.transpose(recent_output, perm=[1, 0, 2, 3])
samples = inplace_ops.alias_inplace_update(samples, i,
tf.to_int64(cur_sample))
samples = tf.transpose(samples, perm=[1, 0, 2, 3])
if not tf.contrib.eager.in_eager_mode():
samples.set_shape([None, None, None, 1])
# Assuming we have one shard for logits.
recent_logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4])
recent_logits = inplace_ops.alias_inplace_update(
recent_logits, i, tf.squeeze(logits[:, -1:], axis=1))
logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4])
loss = sum([l for l in losses.values() if l is not None])
return i + 1, samples, logits, loss
# Create an initial output tensor. This will be passed
# to the infer_step, which adds one timestep at every iteration.
if "partial_targets" in features:
initial_output = tf.to_int64(features["partial_targets"])
while len(initial_output.get_shape().as_list()) < 4:
initial_output = tf.expand_dims(initial_output, 2)
batch_size = common_layers.shape_list(initial_output)[0]
else:
batch_size = common_layers.shape_list(features["inputs"])[0]
initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64)
# Hack: foldl complains when the output shape is less specified than the
# input shape, so we confuse it about the input shape.
initial_output = tf.slice(initial_output, [0, 0, 0, 0],
common_layers.shape_list(initial_output))
target_modality = self._problem_hparams.target_modality
if target_modality.is_class_modality:
decode_length = 1
else:
if "partial_targets" in features:
prefix_length = common_layers.shape_list(features["partial_targets"])[1]
else:
prefix_length = common_layers.shape_list(features["inputs"])[1]
decode_length = prefix_length + decode_length
# Initial values of result, logits and loss.
result = tf.concat(
[initial_output,
tf.zeros([batch_size, decode_length, 1, 1], tf.int64)],
axis=1)
# tensor padded to [batch_size, decode_length, 1, 1, vocab_size]
logits = tf.zeros((batch_size, decode_length, 1, 1,
target_modality.top_dimensionality))
if not tf.contrib.eager.in_eager_mode():
logits.set_shape([None, None, None, None, None])
loss = 0.0
def while_exit_cond(i, result, logits, loss): # pylint: disable=unused-argument
"""Exit the loop either if reach decode_length or EOS."""
not_overflow = i < decode_length
if self._problem_hparams.stop_at_eos:
def fn_not_eos():
# Check if the last predicted element is a EOS
return tf.reduce_any(
tf.not_equal(
tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID))
not_eos = tf.cond(
# We only check for early stopping if there is at least 1 element (
# otherwise not_eos will crash).
tf.not_equal(i, 0),
fn_not_eos,
lambda: True,
)
return tf.cond(
tf.equal(batch_size, 1),
# If batch_size == 1, we check EOS for early stopping.
lambda: tf.logical_and(not_overflow, not_eos),
# Else, just wait for max length
lambda: not_overflow)
return not_overflow
_, result, logits, loss = tf.while_loop(
while_exit_cond,
infer_step, [tf.constant(0), result, logits, loss],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([batch_size, decode_length, 1, 1]),
tf.TensorShape([
batch_size, decode_length, 1, 1,
target_modality.top_dimensionality
]),
tf.TensorShape([]),
],
back_prop=False,
parallel_iterations=1)
if inputs_old is not None: # Restore to not confuse Estimator.
features["inputs"] = inputs_old
# Reassign targets back to the previous value.
if targets_old is not None:
features["targets"] = targets_old
losses = {"training": loss}
if "partial_targets" in features:
partial_target_length = common_layers.shape_list(
features["partial_targets"])[1]
result = tf.slice(result, [0, partial_target_length, 0, 0],
[-1, -1, -1, -1])
return {
"outputs": result,
"scores": None,
"logits": logits,
"losses": losses,
}
def _slow_greedy_infer(self, features, decode_length):
"""A slow greedy inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
if not self.has_input:
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
features["partial_targets"] = tf.to_int64(partial_targets)
# Save the targets in a var and reassign it after the tf.while loop to avoid
# having targets being in a 'while' frame. This ensures targets when used
# in metric functions stays in the same frame as other vars.
targets_old = features.get("targets", None)
target_modality = self._problem_hparams.target_modality
def infer_step(recent_output, recent_logits, unused_loss):
"""Inference step."""
if not tf.contrib.eager.in_eager_mode():
if self._target_modality_is_real:
dim = self._problem_hparams.target_modality.top_dimensionality
recent_output.set_shape([None, None, None, dim])
else:
recent_output.set_shape([None, None, None, 1])
padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]])
features["targets"] = padded
# This is inefficient in that it generates samples at all timesteps,
# not just the last one, except if target_modality is pointwise.
samples, logits, losses = self.sample(features)
# Concatenate the already-generated recent_output with last timestep
# of the newly-generated samples.
if target_modality.top_is_pointwise:
cur_sample = samples[:, -1, :, :]
else:
cur_sample = samples[:,
common_layers.shape_list(recent_output)[1], :, :]
if self._target_modality_is_real:
cur_sample = tf.expand_dims(cur_sample, axis=1)
samples = tf.concat([recent_output, cur_sample], axis=1)
else:
cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1))
samples = tf.concat([recent_output, cur_sample], axis=1)
if not tf.contrib.eager.in_eager_mode():
samples.set_shape([None, None, None, 1])
# Assuming we have one shard for logits.
logits = tf.concat([recent_logits, logits[:, -1:]], 1)
loss = sum([l for l in losses.values() if l is not None])
return samples, logits, loss
# Create an initial output tensor. This will be passed
# to the infer_step, which adds one timestep at every iteration.
if "partial_targets" in features:
initial_output = tf.to_int64(features["partial_targets"])
while len(initial_output.get_shape().as_list()) < 4:
initial_output = tf.expand_dims(initial_output, 2)
batch_size = common_layers.shape_list(initial_output)[0]
else:
batch_size = common_layers.shape_list(features["inputs"])[0]
if self._target_modality_is_real:
dim = self._problem_hparams.target_modality.top_dimensionality
initial_output = tf.zeros((batch_size, 0, 1, dim), dtype=tf.float32)
else:
initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64)
# Hack: foldl complains when the output shape is less specified than the
# input shape, so we confuse it about the input shape.
initial_output = tf.slice(initial_output, [0, 0, 0, 0],
common_layers.shape_list(initial_output))
target_modality = self._problem_hparams.target_modality
if target_modality.is_class_modality:
decode_length = 1
else:
if "partial_targets" in features:
prefix_length = common_layers.shape_list(features["partial_targets"])[1]
else:
prefix_length = common_layers.shape_list(features["inputs"])[1]
decode_length = prefix_length + decode_length
# Initial values of result, logits and loss.
result = initial_output
if self._target_modality_is_real:
logits = tf.zeros((batch_size, 0, 1, target_modality.top_dimensionality))
logits_shape_inv = [None, None, None, None]
else:
# tensor of shape [batch_size, time, 1, 1, vocab_size]
logits = tf.zeros((batch_size, 0, 1, 1,
target_modality.top_dimensionality))
logits_shape_inv = [None, None, None, None, None]
if not tf.contrib.eager.in_eager_mode():
logits.set_shape(logits_shape_inv)
loss = 0.0
def while_exit_cond(result, logits, loss): # pylint: disable=unused-argument
"""Exit the loop either if reach decode_length or EOS."""
length = common_layers.shape_list(result)[1]
not_overflow = length < decode_length
if self._problem_hparams.stop_at_eos:
def fn_not_eos():
return tf.not_equal( # Check if the last predicted element is a EOS
tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID)
not_eos = tf.cond(
# We only check for early stopping if there is at least 1 element (
# otherwise not_eos will crash).
tf.not_equal(length, 0),
fn_not_eos,
lambda: True,
)
return tf.cond(
tf.equal(batch_size, 1),
# If batch_size == 1, we check EOS for early stopping.
lambda: tf.logical_and(not_overflow, not_eos),
# Else, just wait for max length
lambda: not_overflow)
return not_overflow
result, logits, loss = tf.while_loop(
while_exit_cond,
infer_step, [result, logits, loss],
shape_invariants=[
tf.TensorShape([None, None, None, None]),
tf.TensorShape(logits_shape_inv),
tf.TensorShape([]),
],
back_prop=False,
parallel_iterations=1)
if inputs_old is not None: # Restore to not confuse Estimator.
features["inputs"] = inputs_old
# Reassign targets back to the previous value.
if targets_old is not None:
features["targets"] = targets_old
losses = {"training": loss}
if "partial_targets" in features:
partial_target_length = common_layers.shape_list(
features["partial_targets"])[1]
result = tf.slice(result, [0, partial_target_length, 0, 0],
[-1, -1, -1, -1])
return {
"outputs": result,
"scores": None,
"logits": logits,
"losses": losses,
}
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
logits, losses = self(features) # pylint: disable=not-callable
if self._target_modality_is_real:
return logits, logits, losses # Raw numbers returned from real modality.
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses
def _shard_features(self, features): # pylint: disable=missing-docstring
sharded_features = dict()
for k, v in sorted(six.iteritems(features)):
v = tf.convert_to_tensor(v)
v_shape = common_layers.shape_list(v)
if not v_shape:
v = tf.expand_dims(v, axis=-1)
v_shape = [1]
if v_shape == [1]:
v = tf.tile(v, tf.to_int32([self._num_datashards]))
sharded_features[k] = self._data_parallelism(
tf.identity, tf.split(v, self._num_datashards, 0))
return sharded_features
def _to_features_per_datashard(self, features):
datashard_features = []
assert len(features[list(features.keys())[0]]) == self._num_datashards
for d in range(self._num_datashards):
f = {k: v[d] for k, v in six.iteritems(features)}
datashard_features.append(f)
return datashard_features
def _to_single_features_dict(self, datashard_features):
assert len(datashard_features) == self._num_datashards
features = collections.defaultdict(list)
for feats in datashard_features:
for k, v in six.iteritems(feats):
features[k].append(v)
return features
@staticmethod
def get_train_hooks(model_name):
model_cls = registry.model(model_name)
return model_cls.train_hooks()
@staticmethod
def get_eval_hooks(model_name):
model_cls = registry.model(model_name)
return model_cls.eval_hooks()
@staticmethod
def make_estimator_model_fn(model_name,
hparams,
decode_hparams=None):
model_cls = registry.model(model_name)
def wrapping_model_fn(features, labels, mode, params=None, config=None):
return model_cls.estimator_model_fn(
hparams,
features,
labels,
mode,
config=config,
params=params,
decode_hparams=decode_hparams)
return wrapping_model_fn
@classmethod
def estimator_model_fn(cls,
hparams,
features,
labels,
mode,
config=None,
params=None,
decode_hparams=None):
"""Model fn for Estimator.
Args:
hparams: HParams, model hyperparameters
features: dict<str name, Tensor feature>
labels: Tensor
mode: tf.estimator.ModeKeys
config: RunConfig, possibly with data_parallelism attribute
params: dict, may include batch_size, use_tpu
decode_hparams: HParams, used when mode == PREDICT.
Returns:
TPUEstimatorSpec if use tpu else EstimatorSpec
"""
if mode == tf.estimator.ModeKeys.TRAIN:
_create_dummy_vars()
hparams = copy.deepcopy(hparams)
use_tpu = params and params.get("use_tpu", False)
# Instantiate model
data_parallelism = None
if not use_tpu and config:
data_parallelism = config.data_parallelism
reuse = tf.get_variable_scope().reuse
model = cls(
hparams,
mode,
data_parallelism=data_parallelism,
decode_hparams=decode_hparams,
_reuse=reuse)
# PREDICT mode
if mode == tf.estimator.ModeKeys.PREDICT:
return model.estimator_spec_predict(features, use_tpu=use_tpu)
# TRAIN and EVAL modes
if hparams.eval_run_autoregressive and mode == tf.estimator.ModeKeys.EVAL:
logits, losses_dict = model.eval_autoregressive(features)
else:
logits, losses_dict = model(features) # pylint: disable=not-callable
# Set known shapes
if common_layers.is_xla_compiled():
if isinstance(logits, dict):
for k, v in sorted(six.iteritems(logits)):
if "scalar/" in k:
continue
shape = v.get_shape().as_list()
if shape[0] is None:
shape[0] = params["batch_size"]
if shape[1] is None:
shape[1] = hparams.max_length
v.set_shape(shape)
else:
shape = logits.get_shape().as_list()
if shape[0] is None:
shape[0] = params["batch_size"]
if shape[1] is None:
shape[1] = hparams.max_length
logits.set_shape(shape)
assert "training" in losses_dict
# Attack mode
if mode == "attack":
return logits
# Summarize losses
if common_layers.should_generate_summaries():
with tf.name_scope("losses"):
for loss_name, loss_val in sorted(losses_dict.items()):
tf.summary.scalar(loss_name, loss_val)
# Accumulate losses
loss = sum(losses_dict[key] for key in sorted(losses_dict.keys()))
# EVAL mode
if mode == tf.estimator.ModeKeys.EVAL:
return model.estimator_spec_eval(features, logits, labels, loss,
losses_dict)
# TRAIN mode
assert mode == tf.estimator.ModeKeys.TRAIN
num_async_replicas = (1 if (use_tpu or not config) else
config.t2t_device_info["num_async_replicas"])
return model.estimator_spec_train(
loss, num_async_replicas=num_async_replicas, use_tpu=use_tpu)
def initialize_from_ckpt(self, ckpt_dir):
model_dir = self._hparams.get("model_dir", None)
already_has_ckpt = (
model_dir and tf.train.latest_checkpoint(model_dir) is not None)
if already_has_ckpt:
return
# TODO(mitchellstern): Add support for partitioned variables?
reader = tf.contrib.framework.load_checkpoint(ckpt_dir)
variable_map = {}
for var in tf.contrib.framework.get_trainable_variables():
var_name = var.name.split(":")[0]
if reader.has_tensor(var_name):
tf.logging.info("Loading variable from checkpoint: %s", var_name)
variable_map[var_name] = var
else:
tf.logging.info(
"Cannot find variable in checkpoint, skipping: %s", var_name)
tf.train.init_from_checkpoint(ckpt_dir, variable_map)
def estimator_spec_train(self, loss, num_async_replicas=1, use_tpu=False):
"""Construct EstimatorSpec for TRAIN mode."""
train_op = self.optimize(loss, num_async_replicas=num_async_replicas,
use_tpu=use_tpu)
if self._hparams.warm_start_from:
self.initialize_from_ckpt(self._hparams.warm_start_from)
if use_tpu:
host_call = _create_host_call(self.hparams.model_dir)
_remove_summaries()
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=train_op,
host_call=host_call)
else:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op)
def estimator_spec_eval(self, features, logits, labels, loss, losses_dict):
"""Construct EstimatorSpec for EVAL mode."""
del losses_dict
hparams = self.hparams
if not hasattr(hparams, "problem"):
raise NotImplementedError(_no_problem_err("estimator_spec_eval"))
problem = hparams.problem
if common_layers.is_xla_compiled():
_remove_summaries()
if isinstance(logits, dict):
eval_metrics_fn = _create_tpu_eval_metrics_fn(problem, hparams)
# For TPU, logits dict will be passed as keyword arguments to
# eval_metrics_fn. Here we add the labels to those arguments.
logits.update({"labels": labels})
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
eval_metrics=(eval_metrics_fn, logits),
loss=loss)
else:
eval_metrics_fn = _create_tpu_eval_metrics_fn(problem, hparams)
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
eval_metrics=(eval_metrics_fn, [logits, labels]),
loss=loss)
else:
task_list = [problem]
if hasattr(problem, "task_list"):
task_list = problem.task_list
eval_metrics_fns = metrics.create_evaluation_metrics(task_list, hparams)
eval_metrics = {}
for metric_name, metric_fn in six.iteritems(eval_metrics_fns):
if isinstance(logits, dict):
# the key is located in the center of metric_name: "metrics-%s/%s/%s"
k = metric_name.split("/")[1]
if k in logits:
eval_metrics[metric_name] = metric_fn(logits[k], features,
features[k])
else:
# We do not make it an error because we sometimes run models that
# predict only parts of the targets defined by the Problem class.
# For example, an autoencoder or pure-video model can run on a gym
# problem even if another model is also predicting other things,
# like actions or rewards.
tf.logging.warning("No key %s in logits for evaluation." % k)
else:
eval_metrics[metric_name] = metric_fn(logits, features,
features["targets"])
if isinstance(logits, dict):
predictions = logits
else:
predictions = {"predictions": logits}
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.EVAL,
predictions=predictions,
eval_metric_ops=eval_metrics,
loss=loss)
def estimator_spec_predict(self, features, use_tpu=False):
"""Construct EstimatorSpec for PREDICT mode."""
decode_hparams = self._decode_hparams
infer_out = self.infer(
features,
beam_size=decode_hparams.beam_size,
top_beams=(decode_hparams.beam_size
if decode_hparams.return_beams else 1),
alpha=decode_hparams.alpha,
decode_length=decode_hparams.extra_length,
use_tpu=use_tpu)
if isinstance(infer_out, dict):
outputs = infer_out["outputs"]
scores = infer_out["scores"]
else:
outputs = infer_out
scores = None
inputs = features.get("inputs")
if inputs is None:
inputs = features["targets"]
predictions = {
"outputs": outputs,
"scores": scores,
"inputs": inputs,
"targets": features.get("infer_targets"),
}
# Pass through remaining features
for name, feature in features.items():
if name not in list(predictions.keys()) + ["infer_targets"]:
if not feature.shape.as_list():
# All features must have a batch dimension
batch_size = common_layers.shape_list(outputs)[0]
feature = tf.tile(tf.expand_dims(feature, 0), [batch_size])
predictions[name] = feature
_del_dict_non_tensors(predictions)
export_out = {"outputs": predictions["outputs"]}
if "scores" in predictions:
export_out["scores"] = predictions["scores"]
# Necessary to rejoin examples in the correct order with the Cloud ML Engine
# batch prediction API.
if "batch_prediction_key" in predictions:
export_out["batch_prediction_key"] = predictions["batch_prediction_key"]
_remove_summaries()
export_outputs = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.PredictOutput(export_out)
}
if use_tpu:
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs=export_outputs)
else:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs=export_outputs)
def _normalize_body_output(self, body_out):
if isinstance(body_out, tuple):
output, losses = body_out
if not isinstance(losses, dict):
losses = {"extra": tf.reduce_mean(losses)}
else:
output = body_out
losses = {"extra": 0.0}
return output, losses
def _warn_changed_modality_type(new_name, old_name, feature_name):
new_type, new_name = registry.parse_modality_name(new_name)
old_type, old_name = registry.parse_modality_name(old_name)
if new_type != old_type:
log_warn(
"%s has a designated modality type %s (%s) but has been "
"overridden with a modality of type %s (%s).", feature_name, old_type,
old_name, new_type, new_name)
def _with_timing(fn, msg, silent=False):
def fn_with_timing(*args, **kwargs):
start_time = time.time()
res = fn(*args, **kwargs)
if not silent:
log_info("Doing %s took %.3f sec." % (msg, time.time() - start_time))
return res
return fn_with_timing
def _create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath."""
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
return
with tf.variable_scope("losses_avg"):
with tf.variable_scope("problem_0"):
for var_name in ["total", "extra", "training"]:
tf.get_variable(
"%s_loss" % var_name, initializer=100.0, trainable=False)
with tf.variable_scope("train_stats"):
tf.get_variable("problem_0_steps", initializer=0, trainable=False)
# These metrics are implemented with py_funcs and therefore do no work with TPU
TPU_METRIC_BLACKLIST = set([
metrics.Metrics.APPROX_BLEU,
metrics.Metrics.ROUGE_2_F,
metrics.Metrics.ROUGE_L_F,
metrics.Metrics.IMAGE_SUMMARY,
])
def _create_tpu_eval_metrics_fn(problem, hparams):
"""Create the metrics_fn that TPUEstimatorSpec expects."""
metric_fns = []
eval_metrics = problem.eval_metrics()
tm = problem.get_hparams().target_modality
if isinstance(tm, dict):
for k, v in six.iteritems(tm):
if isinstance(v, tuple):
v = registry.create_modality(v, hparams)
weights_fn = v.targets_weights_fn
def make_metric_fn(metric_fn):
def wrapped_metric_fn(logits, labels, weights_fn=weights_fn):
num, den = metric_fn(logits, labels, weights_fn=weights_fn)
return tf.metrics.mean(num, den)
return wrapped_metric_fn
for metric in eval_metrics:
if metric in TPU_METRIC_BLACKLIST:
log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
continue
name = "%s/metrics-%s/%s" % (k, problem.name, metric)
metric_fns.append((name, make_metric_fn(metrics.METRICS_FNS[metric])))
else:
if isinstance(tm, tuple):
tm = registry.create_modality(tm, hparams)
weights_fn = tm.targets_weights_fn
def make_metric_fn(metric_fn):
def wrapped_metric_fn(logits, labels):
num, den = metric_fn(logits, labels, weights_fn=weights_fn)
return tf.metrics.mean(num, den)
return wrapped_metric_fn
for metric in eval_metrics:
if metric in TPU_METRIC_BLACKLIST:
log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
continue
name = "metrics-%s/%s" % (problem.name, metric)
metric_fns.append((name, make_metric_fn(metrics.METRICS_FNS[metric])))
def all_metrics_fn(logits=None, labels=None, **kwargs):
"""Construct metrics dictionary."""
metrics_dict = {}
if logits is None:
logits = kwargs
for name, fn in metric_fns:
if isinstance(logits, dict) and isinstance(labels, dict):
for k, v in six.iteritems(logits):
metrics_dict["%s/%s" % (k, name)] = fn(v, labels[k])
elif isinstance(logits, dict):
tf.logging.warning("Logits is a dict, but labels is not; only "
"evaluating logits['targets'] against labels.")
metrics_dict["%s/%s" % ("targets", name)] = fn(logits["targets"],
labels)
else:
metrics_dict[name] = fn(logits, labels)
return metrics_dict
return all_metrics_fn
def _remove_summaries():
g = tf.get_default_graph()
key = tf.GraphKeys.SUMMARIES
del g.get_collection_ref(key)[:]
assert not g.get_collection(key)
def _create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
summaries = graph.get_collection(tf.GraphKeys.SUMMARIES)
gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
summary_kwargs = collections.OrderedDict()
for t in summaries:
# TODO(aidangomez): enable ImageSummary support when we have a faster method
# see @shibow's comment in cl/202344570
if t.op.type not in ["ScalarSummary"]:
tf.logging.warn("Ignoring unsupported tf.Summary type %s" % t.op.type)
continue
name = t.op.name
tensor = t.op.inputs[1]
if t.op.type == "ScalarSummary":
assert tensor.shape.is_compatible_with([])
if tensor.dtype == tf.int64:
tensor = tf.to_int32(tensor)
summary_kwargs["ScalarSummary" + name] = tf.reshape(tensor, [1])
elif t.op.type == "ImageSummary":
# TODO(aidangomez): as we move to support more types, update
# common_layers.tpu_safe_image_summary
if tensor.dtype != tf.float32:
tf.logging.warn(
"Currently T2T on TPU only supports ImageSummary of "
"tf.float32-type Tensors. Skipping Tensor "
"%s with dtype %s..." % (tensor.name, tensor.dtype))
continue
# tensor = tf.to_float(tensor)
summary_kwargs["ImageSummary" + name] = tensor
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not summary_kwargs:
return None
summary_kwargs["global_step"] = gs_t
def host_call_fn(**kwargs):
"""Training host call. Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
gs = tf.to_int64(kwargs.pop("global_step")[0])
with tf.contrib.summary.create_file_writer(model_dir).as_default():
with tf.contrib.summary.always_record_summaries():
# We need to use tf.contrib.summary in order to feed the `step`.
for name, value in sorted(six.iteritems(kwargs)):
if name.startswith("ScalarSummary"):
name = name[len("ScalarSummary"):]
tf.contrib.summary.scalar(
name, tf.reduce_mean(tf.to_float(value)), step=gs)
elif name.startswith("ImageSummary"):
name = name[len("ImageSummary"):]
tf.contrib.summary.image(name, value, step=gs)
return tf.contrib.summary.all_summary_ops()
return (host_call_fn, summary_kwargs)
def _del_dict_non_tensors(d):
for k in list(d.keys()):
if not isinstance(d[k], tf.Tensor):
del d[k]
class DummyVariableStore(object):
@contextlib.contextmanager
def as_default(self):
yield
def create_eager_var_store():
if tf.contrib.eager.in_eager_mode():
return variable_scope.EagerVariableStore()
else:
return DummyVariableStore()
def scheduled_sampling(hparams, problem_hparams, dp, sharded_logits, losses,
sharded_features, transformed_features, model):
"""Scheduled sampling."""
target_modality = problem_hparams.target_modality
def sample(x):
"""Multinomial sampling from a n-dimensional tensor."""
vocab_size = target_modality.top_dimensionality
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]), 1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return tf.to_int32(reshaped_samples)
def mix_gold_sampled(gold_targets, sampled_targets):
return tf.where(
tf.less(
tf.random_uniform(common_layers.shape_list(sampled_targets)),
hparams.scheduled_sampling_gold_mixin_prob), gold_targets,
sampled_targets)
def sampled_results():
"""Generate scheduled sampling results."""
sampled_targets = dp(sample, sharded_logits)
new_targets = dp(mix_gold_sampled, sharded_features["targets"],
sampled_targets)
new_features = transformed_features
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
with tf.variable_scope(target_modality.name):
new_features["targets"] = target_modality.targets_bottom_sharded(
new_targets, dp)
with tf.variable_scope("body"):
body_outputs, losses = model.model_fn_sharded(new_features)
if not isinstance(losses, dict): # If it's a single extra loss.
losses = {"extra": losses}
with tf.variable_scope(target_modality.name):
new_sharded_logits = target_modality.top_sharded(
body_outputs, sharded_features["targets"], dp)
if "training" not in losses:
training_loss = target_modality.loss_sharded(
sharded_logits, sharded_features["targets"], dp)
training_loss *= problem_hparams.loss_multiplier
losses["training"] = training_loss
return new_sharded_logits, losses
# Run the above conditionally.
prob = hparams.scheduled_sampling_prob
prob *= common_layers.inverse_exp_decay(
hparams.scheduled_sampling_warmup_steps, min_value=0.001)
sharded_logits, losses = tf.cond(
tf.less(tf.random_uniform([]), prob), sampled_results,
lambda: (sharded_logits, losses))
return sharded_logits, losses
def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
"""
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses
def summarize_features(features, num_shards=1):
"""Generate summaries for features."""
if not common_layers.should_generate_summaries():
return
with tf.name_scope("input_stats"):
for (k, v) in sorted(six.iteritems(features)):
if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1:
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding))
_already_logged = set()
def _eager_log(level, *args):
if tf.contrib.eager.in_eager_mode() and args in _already_logged:
return
_already_logged.add(args)
getattr(tf.logging, level)(*args)
def log_info(*args):
_eager_log("info", *args)
def log_warn(*args):
_eager_log("warn", *args)
def _compose_custom_getters(getter_a, getter_b):
"""Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter
"""
if not getter_a:
return getter_b
if not getter_b:
return getter_a
def getter_fn(getter, *args, **kwargs):
return getter_b(functools.partial(getter_a, getter), *args, **kwargs)
return getter_fn
def set_custom_getter_compose(custom_getter):
"""Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
"""
tf.get_variable_scope().set_custom_getter(
_compose_custom_getters(tf.get_variable_scope().custom_getter,
custom_getter))
| 38.473002
| 84
| 0.662648
|
948a57feb5e8d5dd6bff822d8b929adbc9214049
| 461
|
py
|
Python
|
CreateMasks.py
|
genigarus/CreateImageOverlaysandDepthMask
|
90aa3362970c6e27459995dfb71134691fc10220
|
[
"Apache-2.0"
] | null | null | null |
CreateMasks.py
|
genigarus/CreateImageOverlaysandDepthMask
|
90aa3362970c6e27459995dfb71134691fc10220
|
[
"Apache-2.0"
] | null | null | null |
CreateMasks.py
|
genigarus/CreateImageOverlaysandDepthMask
|
90aa3362970c6e27459995dfb71134691fc10220
|
[
"Apache-2.0"
] | null | null | null |
import glob
from PIL import Image
out = r"D:\Development\TSAI\EVA\MaskRCNN Dataset\Foreground\masks\{}"
path = r"D:\Development\TSAI\EVA\MaskRCNN Dataset\Foreground\resize\*.*"
for file in glob.glob(path):
im = Image.open(file, 'r')
file_name = file.split("\\")[-1]
rgb_data = im.tobytes("raw", "RGB")
alpha_data = im.tobytes("raw", "A")
alpha_image = Image.frombytes("L", im.size, alpha_data)
alpha_image.save(out.format(file_name))
| 30.733333
| 72
| 0.678959
|
abee95fdaf26f13b273ab160923ce0be64258f09
| 1,406
|
py
|
Python
|
Solver/FDM/VectorGenerater.py
|
atily17/research
|
0e762e03747995c8a7d1d8a2ec42be31a17209dc
|
[
"BSD-3-Clause"
] | null | null | null |
Solver/FDM/VectorGenerater.py
|
atily17/research
|
0e762e03747995c8a7d1d8a2ec42be31a17209dc
|
[
"BSD-3-Clause"
] | null | null | null |
Solver/FDM/VectorGenerater.py
|
atily17/research
|
0e762e03747995c8a7d1d8a2ec42be31a17209dc
|
[
"BSD-3-Clause"
] | 1
|
2022-02-25T06:38:29.000Z
|
2022-02-25T06:38:29.000Z
|
import numpy as np
class VectorGenerater(object):
def generate(self, problem, grid):
nodes = grid.node.nodes
vector = np.zeros(len(nodes))
for i in range(len(nodes)):
if nodes[i]["position"][0] == "i":
vector[i] = problem.source.source(nodes[i]["point"])
elif "b" in nodes[i]["position"][0]:
vector[i] = problem.domain.bc["bc"][int(nodes[i]["position"][1:])]["constant"]
elif "c" in nodes[i]["position"][0]:
if problem.domain.bc["priority"] is None:
vector[i]=problem.domain.bc[int(grid.lattice.nodes[i].position[1:])][1]
vector[i]=problem.domain.bc[int(grid.lattice.nodes[i].position[1:]) - 1][1]
vector[i]/=2
elif type(problem.domain.bc["priority"]) == list:
k1 = int(nodes[i]["position"][1:])
k2 = (int(nodes[i]["position"][1:]) - 1) % problem.domain.nVertexes
index1 = problem.domain.bc["priority"].index(k1)
index2 = problem.domain.bc["priority"].index(k2)
if index1 < index2:
vector[i]=problem.domain.bc["bc"][k1]["constant"]
if index1 > index2:
vector[i]=problem.domain.bc["bc"][k2]["constant"]
return vector
| 54.076923
| 96
| 0.495021
|
4decbf4240689040244d9296583939d60a9dcd59
| 2,499
|
py
|
Python
|
azure-mgmt-web/azure/mgmt/web/models/operation.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-web/azure/mgmt/web/models/operation.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-web/azure/mgmt/web/models/operation.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Operation(Model):
"""An operation on a resource.
:param id: Operation ID.
:type id: str
:param name: Operation name.
:type name: str
:param status: The current status of the operation. Possible values
include: 'InProgress', 'Failed', 'Succeeded', 'TimedOut', 'Created'
:type status: str or ~azure.mgmt.web.models.OperationStatus
:param errors: Any errors associate with the operation.
:type errors: list[~azure.mgmt.web.models.ErrorEntity]
:param created_time: Time when operation has started.
:type created_time: datetime
:param modified_time: Time when operation has been updated.
:type modified_time: datetime
:param expiration_time: Time when operation will expire.
:type expiration_time: datetime
:param geo_master_operation_id: Applicable only for stamp operation ids.
:type geo_master_operation_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'OperationStatus'},
'errors': {'key': 'errors', 'type': '[ErrorEntity]'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'expirationTime', 'type': 'iso-8601'},
'geo_master_operation_id': {'key': 'geoMasterOperationId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Operation, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.status = kwargs.get('status', None)
self.errors = kwargs.get('errors', None)
self.created_time = kwargs.get('created_time', None)
self.modified_time = kwargs.get('modified_time', None)
self.expiration_time = kwargs.get('expiration_time', None)
self.geo_master_operation_id = kwargs.get('geo_master_operation_id', None)
| 43.086207
| 82
| 0.62425
|
f59c3e593d31f3f4d336a2c5a8e7a5bd01c60561
| 3,941
|
py
|
Python
|
appengine/integration_tests/deploy_check.py
|
JonathanRRogers/runtimes-common
|
6b69050bed4389763ddff8e1b9ec48f12ac32fc5
|
[
"Apache-2.0"
] | 95
|
2016-09-09T23:36:36.000Z
|
2022-03-05T20:06:00.000Z
|
appengine/integration_tests/deploy_check.py
|
JonathanRRogers/runtimes-common
|
6b69050bed4389763ddff8e1b9ec48f12ac32fc5
|
[
"Apache-2.0"
] | 392
|
2016-09-13T15:15:57.000Z
|
2022-02-22T01:18:23.000Z
|
appengine/integration_tests/deploy_check.py
|
JonathanRRogers/runtimes-common
|
6b69050bed4389763ddff8e1b9ec48f12ac32fc5
|
[
"Apache-2.0"
] | 73
|
2016-09-08T19:27:03.000Z
|
2021-07-08T13:28:18.000Z
|
#!/usr/bin/python
# Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
from retrying import retry
import subprocess
import sys
from testsuite import deploy_app
from testsuite import test_util
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--directory', '-d', type=str,
help='Directory of app to be run',
required=True)
parser.add_argument('--language', '-l', type=str,
help='Language of the app deployed',
required=False)
parser.add_argument('--verbose', '-v', help='Debug logging',
action='store_true', required=False)
parser.add_argument('--skip-builders', action='store_true',
help='Skip runtime builder flow', default=False)
parser.add_argument('--skip-xrt', action='store_true',
help='Skip XRT flow', default=False)
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
# retrieve previous config value to reset after
cmd = ['gcloud', 'config', 'list', '--format=json']
output = json.loads(subprocess.check_output(cmd))
prev_builder_value = None
if 'app' in output:
prev_builder_value = output.get('app').get('use_runtime_builders')
if args.skip_xrt:
logging.info('Skipping xrt flow.')
else:
# disable app/use_runtime_builders to hit the XRT flow
_set_runtime_builder_flag(False)
_deploy_and_test(args.directory, args.language, True)
if args.skip_builders:
logging.info('Skipping builder flow.')
else:
# set app/use_runtime_builders to explicitly enter builder flow
_set_runtime_builder_flag(True)
_deploy_and_test(args.directory, args.language, False)
_set_runtime_builder_flag(prev_builder_value)
def _set_runtime_builder_flag(flag):
try:
if flag is None:
cmd = ['gcloud', 'config', 'unset',
'app/use_runtime_builders']
else:
cmd = ['gcloud', 'config', 'set',
'app/use_runtime_builders', str(flag)]
subprocess.check_output(cmd)
except subprocess.CalledProcessError as cpe:
logging.error(cpe.output)
sys.exit(1)
def _deploy_and_test(appdir, language, is_xrt):
version = None
try:
logging.debug('Testing runtime image.')
version, url = deploy_app.deploy_app_and_record_latency(appdir,
language,
is_xrt)
application_url = test_util.retrieve_url_for_version(version)
_test_application(application_url)
except Exception as e:
logging.error('Error when contacting application: %s', e)
sys.exit(1)
finally:
if version:
deploy_app.stop_version(version)
@retry(wait_fixed=4000, stop_max_attempt_number=8)
def _test_application(application_url):
output, status_code = test_util.get(application_url)
if status_code:
logging.error(output)
raise RuntimeError('Application returned non-zero status code: %d',
status_code)
else:
return output
if __name__ == '__main__':
sys.exit(main())
| 33.974138
| 75
| 0.639685
|
07c18fd20821623e5214aa901eaf2ca4ccdd8c1e
| 1,504
|
py
|
Python
|
nginx/tests/test_vts.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 663
|
2016-08-23T05:23:45.000Z
|
2022-03-29T00:37:23.000Z
|
nginx/tests/test_vts.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 6,642
|
2016-06-09T16:29:20.000Z
|
2022-03-31T22:24:09.000Z
|
nginx/tests/test_vts.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 1,222
|
2017-01-27T15:51:38.000Z
|
2022-03-31T18:17:51.000Z
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.nginx import VTS_METRIC_MAP
from .common import TAGS, USING_VTS
pytestmark = pytest.mark.skipif(not USING_VTS, reason='Not using VTS')
@pytest.mark.usefixtures('dd_environment')
def test_vts(check, instance_vts, aggregator):
check = check(instance_vts)
check.check(instance_vts)
# skip metrics that are difficult to reproduce in a test environment
skip_metrics = [
'nginx.upstream.peers.responses.1xx',
'nginx.upstream.peers.responses.2xx',
'nginx.upstream.peers.responses.3xx',
'nginx.upstream.peers.responses.4xx',
'nginx.upstream.peers.responses.5xx',
'nginx.upstream.peers.requests',
'nginx.upstream.peers.received',
'nginx.server_zone.received',
'nginx.server_zone.responses.1xx',
'nginx.server_zone.responses.2xx',
'nginx.server_zone.responses.3xx',
'nginx.server_zone.responses.4xx',
'nginx.server_zone.responses.5xx',
'nginx.server_zone.requests',
'nginx.server_zone.sent',
'nginx.upstream.peers.sent',
'nginx.upstream.peers.health_checks.last_passed',
'nginx.upstream.peers.weight',
'nginx.upstream.peers.backup',
]
for mapped in VTS_METRIC_MAP.values():
if mapped in skip_metrics:
continue
aggregator.assert_metric(mapped, tags=TAGS)
| 33.422222
| 72
| 0.684176
|
6af110291a019161aa7987ab19e03fdb618b7e3f
| 8,161
|
py
|
Python
|
package_control/deps/oscrypto/_win/trust_list.py
|
tower000/sublime_package_control
|
db53090bd0920ca2c58ef27f0361a4d7b096df0e
|
[
"MIT",
"Unlicense"
] | 3
|
2019-06-06T00:13:44.000Z
|
2020-08-16T20:11:13.000Z
|
package_control/deps/oscrypto/_win/trust_list.py
|
Allyn69/package_control
|
f78578ed67529e263fb1f4e4f90f92295830560f
|
[
"MIT",
"Unlicense"
] | null | null | null |
package_control/deps/oscrypto/_win/trust_list.py
|
Allyn69/package_control
|
f78578ed67529e263fb1f4e4f90f92295830560f
|
[
"MIT",
"Unlicense"
] | 1
|
2021-07-26T00:35:53.000Z
|
2021-07-26T00:35:53.000Z
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import datetime
import hashlib
import struct
from ...asn1crypto.x509 import Certificate
from .._ffi import (
array_from_pointer,
buffer_from_bytes,
bytes_from_buffer,
cast,
deref,
is_null,
new,
null,
struct_from_buffer,
unwrap,
)
from ._crypt32 import crypt32, Crypt32Const, get_error, handle_error
from .._types import str_cls
__all__ = [
'extract_from_system',
'system_path',
]
def system_path():
return None
def extract_from_system(cert_callback=None, callback_only_on_failure=False):
"""
Extracts trusted CA certificates from the Windows certificate store
:param cert_callback:
A callback that is called once for each certificate in the trust store.
It should accept two parameters: an asn1crypto.x509.Certificate object,
and a reason. The reason will be None if the certificate is being
exported, otherwise it will be a unicode string of the reason it won't.
:param callback_only_on_failure:
A boolean - if the callback should only be called when a certificate is
not exported.
:raises:
OSError - when an error is returned by the OS crypto library
:return:
A list of 3-element tuples:
- 0: a byte string of a DER-encoded certificate
- 1: a set of unicode strings that are OIDs of purposes to trust the
certificate for
- 2: a set of unicode strings that are OIDs of purposes to reject the
certificate for
"""
certificates = {}
processed = {}
now = datetime.datetime.utcnow()
for store in ["ROOT", "CA"]:
store_handle = crypt32.CertOpenSystemStoreW(null(), store)
handle_error(store_handle)
context_pointer = null()
while True:
context_pointer = crypt32.CertEnumCertificatesInStore(store_handle, context_pointer)
if is_null(context_pointer):
break
context = unwrap(context_pointer)
trust_all = False
data = None
digest = None
if context.dwCertEncodingType != Crypt32Const.X509_ASN_ENCODING:
continue
data = bytes_from_buffer(context.pbCertEncoded, int(context.cbCertEncoded))
digest = hashlib.sha1(data).digest()
if digest in processed:
continue
processed[digest] = True
cert_info = unwrap(context.pCertInfo)
not_before_seconds = _convert_filetime_to_timestamp(cert_info.NotBefore)
try:
not_before = datetime.datetime.fromtimestamp(not_before_seconds)
if not_before > now:
if cert_callback:
cert_callback(Certificate.load(data), 'not yet valid')
continue
except (ValueError, OSError) as e:
# If there is an error converting the not before timestamp,
# it is almost certainly because it is from too long ago,
# which means the cert is definitely valid by now.
pass
not_after_seconds = _convert_filetime_to_timestamp(cert_info.NotAfter)
try:
not_after = datetime.datetime.fromtimestamp(not_after_seconds)
if not_after < now:
if cert_callback:
cert_callback(Certificate.load(data), 'no longer valid')
continue
except (ValueError, OSError) as e:
# The only reason we would get an exception here is if the
# expiration time is so far in the future that it can't be
# used as a timestamp, or it is before 0. If it is very far
# in the future, the cert is still valid, so we only raise
# an exception if the timestamp is less than zero.
if not_after_seconds < 0:
message = e.args[0] + ' - ' + str_cls(not_after_seconds)
e.args = (message,) + e.args[1:]
raise e
trust_oids = set()
reject_oids = set()
# Here we grab the extended key usage properties that Windows
# layers on top of the extended key usage extension that is
# part of the certificate itself. For highest security, users
# should only use certificates for the intersection of the two
# lists of purposes. However, many seen to treat the OS trust
# list as an override.
to_read = new(crypt32, 'DWORD *', 0)
res = crypt32.CertGetEnhancedKeyUsage(
context_pointer,
Crypt32Const.CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG,
null(),
to_read
)
# Per the Microsoft documentation, if CRYPT_E_NOT_FOUND is returned
# from get_error(), it means the certificate is valid for all purposes
error_code, _ = get_error()
if not res and error_code != Crypt32Const.CRYPT_E_NOT_FOUND:
handle_error(res)
if error_code == Crypt32Const.CRYPT_E_NOT_FOUND:
trust_all = True
else:
usage_buffer = buffer_from_bytes(deref(to_read))
res = crypt32.CertGetEnhancedKeyUsage(
context_pointer,
Crypt32Const.CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG,
cast(crypt32, 'CERT_ENHKEY_USAGE *', usage_buffer),
to_read
)
handle_error(res)
key_usage_pointer = struct_from_buffer(crypt32, 'CERT_ENHKEY_USAGE', usage_buffer)
key_usage = unwrap(key_usage_pointer)
# Having no enhanced usage properties means a cert is distrusted
if key_usage.cUsageIdentifier == 0:
if cert_callback:
cert_callback(Certificate.load(data), 'explicitly distrusted')
continue
oids = array_from_pointer(
crypt32,
'LPCSTR',
key_usage.rgpszUsageIdentifier,
key_usage.cUsageIdentifier
)
for oid in oids:
trust_oids.add(oid.decode('ascii'))
cert = None
# If the certificate is not under blanket trust, we have to
# determine what purposes it is rejected for by diffing the
# set of OIDs from the certificate with the OIDs that are
# trusted.
if not trust_all:
cert = Certificate.load(data)
if cert.extended_key_usage_value:
for cert_oid in cert.extended_key_usage_value:
oid = cert_oid.dotted
if oid not in trust_oids:
reject_oids.add(oid)
if cert_callback and not callback_only_on_failure:
if cert is None:
cert = Certificate.load(data)
cert_callback(cert, None)
certificates[digest] = (data, trust_oids, reject_oids)
result = crypt32.CertCloseStore(store_handle, 0)
handle_error(result)
store_handle = None
return certificates.values()
def _convert_filetime_to_timestamp(filetime):
"""
Windows returns times as 64-bit unsigned longs that are the number
of hundreds of nanoseconds since Jan 1 1601. This converts it to
a datetime object.
:param filetime:
A FILETIME struct object
:return:
An integer unix timestamp
"""
hundreds_nano_seconds = struct.unpack(
b'>Q',
struct.pack(
b'>LL',
filetime.dwHighDateTime,
filetime.dwLowDateTime
)
)[0]
seconds_since_1601 = hundreds_nano_seconds / 10000000
return seconds_since_1601 - 11644473600 # Seconds from Jan 1 1601 to Jan 1 1970
| 35.79386
| 98
| 0.591349
|
52aa4a9fcec34db7fdcc3bca4a48e2d59cdc3e72
| 3,130
|
py
|
Python
|
mjrl/utils/logger.py
|
xtwentian3/mjrl
|
e403c67c165e37d933a4bee8f80771d1046b51f3
|
[
"Apache-2.0"
] | null | null | null |
mjrl/utils/logger.py
|
xtwentian3/mjrl
|
e403c67c165e37d933a4bee8f80771d1046b51f3
|
[
"Apache-2.0"
] | null | null | null |
mjrl/utils/logger.py
|
xtwentian3/mjrl
|
e403c67c165e37d933a4bee8f80771d1046b51f3
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import scipy
import pickle
import os
import csv
class DataLog:
def __init__(self):
self.log = {}
self.max_len = 0
def log_kv(self, key, value):
# logs the (key, value) pair
# TODO: This implementation is error-prone:
# it would be NOT aligned if some keys are missing during one iteration.
if key not in self.log:
self.log[key] = []
self.log[key].append(value)
if len(self.log[key]) > self.max_len:
self.max_len = self.max_len + 1
def save_log(self, save_path):
# TODO: Validate all lengths are the same.
pickle.dump(self.log, open(save_path + '/log.pickle', 'wb'))
with open(save_path + '/log.csv', 'w') as csv_file:
fieldnames = list(self.log.keys())
if 'iteration' not in fieldnames:
fieldnames = ['iteration'] + fieldnames
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for row in range(self.max_len):
row_dict = {'iteration': row}
for key in self.log.keys():
if row < len(self.log[key]):
row_dict[key] = self.log[key][row]
writer.writerow(row_dict)
def get_current_log(self):
row_dict = {}
for key in self.log.keys():
# TODO: this is very error-prone (alignment is not guaranteed)
row_dict[key] = self.log[key][-1]
return row_dict
def get_current_log_print(self):
row_dict = {}
for key in self.log.keys():
# TODO: this is very error-prone (alignment is not guaranteed)
if len(self.log[key]) > 1 : row_dict[key] = self.log[key][-1]
return row_dict
def shrink_to(self, num_entries):
for key in self.log.keys():
self.log[key] = self.log[key][:num_entries]
self.max_len = num_entries
assert min([len(series) for series in self.log.values()]) == \
max([len(series) for series in self.log.values()])
def read_log(self, log_path):
assert log_path.endswith('log.csv')
with open(log_path) as csv_file:
reader = csv.DictReader(csv_file)
listr = list(reader)
keys = reader.fieldnames
data = {}
for key in keys:
data[key] = []
for row, row_dict in enumerate(listr):
for key in keys:
try:
data[key].append(eval(row_dict[key]))
except:
print("ERROR on reading key {}: {}".format(key, row_dict[key]))
if 'iteration' in data and data['iteration'][-1] != row:
raise RuntimeError("Iteration %d mismatch -- possibly corrupted logfile?" % row)
self.log = data
self.max_len = max(len(v) for k, v in self.log.items())
print("Log read from {}: had {} entries".format(log_path, self.max_len))
| 35.168539
| 100
| 0.554633
|
156b4c21bd77fad235ddb2847e6906beb5ea5256
| 5,581
|
py
|
Python
|
superset/importexport/api.py
|
Human-Security-Insights/superset
|
a8e7624eb5635ed9b84f1454d35029e1c18ebdee
|
[
"Apache-2.0"
] | 1
|
2022-02-10T11:30:05.000Z
|
2022-02-10T11:30:05.000Z
|
superset/importexport/api.py
|
Human-Security-Insights/superset
|
a8e7624eb5635ed9b84f1454d35029e1c18ebdee
|
[
"Apache-2.0"
] | 10
|
2022-01-05T01:31:07.000Z
|
2022-03-16T01:09:46.000Z
|
superset/importexport/api.py
|
Human-Security-Insights/superset
|
a8e7624eb5635ed9b84f1454d35029e1c18ebdee
|
[
"Apache-2.0"
] | 1
|
2022-03-09T02:57:17.000Z
|
2022-03-09T02:57:17.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from datetime import datetime
from io import BytesIO
from zipfile import is_zipfile, ZipFile
from flask import request, Response, send_file
from flask_appbuilder.api import BaseApi, expose, protect
from superset.commands.export.assets import ExportAssetsCommand
from superset.commands.importers.exceptions import (
IncorrectFormatError,
NoValidFilesFoundError,
)
from superset.commands.importers.v1.assets import ImportAssetsCommand
from superset.commands.importers.v1.utils import get_contents_from_bundle
from superset.extensions import event_logger
from superset.views.base_api import requires_form_data
class ImportExportRestApi(BaseApi):
"""
API for exporting all assets or importing them.
"""
resource_name = "assets"
openapi_spec_tag = "Import/export"
@expose("/export/", methods=["GET"])
@protect()
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.export",
log_to_statsd=False,
)
def export(self) -> Response:
"""
Export all assets.
---
get:
description: >-
Returns a ZIP file with all the Superset assets (databases, datasets, charts,
dashboards, saved queries) as YAML files.
responses:
200:
description: ZIP file
content:
application/zip:
schema:
type: string
format: binary
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
root = f"assets_export_{timestamp}"
filename = f"{root}.zip"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
for file_name, file_content in ExportAssetsCommand().run():
with bundle.open(f"{root}/{file_name}", "w") as fp:
fp.write(file_content.encode())
buf.seek(0)
response = send_file(
buf,
mimetype="application/zip",
as_attachment=True,
attachment_filename=filename,
)
return response
@expose("/import/", methods=["POST"])
@protect()
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.import_",
log_to_statsd=False,
)
@requires_form_data
def import_(self) -> Response:
"""Import multiple assets
---
post:
requestBody:
required: true
content:
multipart/form-data:
schema:
type: object
properties:
bundle:
description: upload file (ZIP or JSON)
type: string
format: binary
passwords:
description: >-
JSON map of passwords for each featured database in the
ZIP file. If the ZIP includes a database config in the path
`databases/MyDatabase.yaml`, the password should be provided
in the following format:
`{"databases/MyDatabase.yaml": "my_password"}`.
type: string
responses:
200:
description: Dashboard import result
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
upload = request.files.get("bundle")
if not upload:
return self.response_400()
if not is_zipfile(upload):
raise IncorrectFormatError("Not a ZIP file")
with ZipFile(upload) as bundle:
contents = get_contents_from_bundle(bundle)
if not contents:
raise NoValidFilesFoundError()
passwords = (
json.loads(request.form["passwords"])
if "passwords" in request.form
else None
)
command = ImportAssetsCommand(contents, passwords=passwords)
command.run()
return self.response(200, message="OK")
| 34.030488
| 89
| 0.576778
|
073523ae945b3f906234434ebcd14e86b8e4a8ef
| 2,450
|
py
|
Python
|
case/__final__/02_concrete1.py
|
BlockResearchGroup/WS_structural-design
|
9a1222e728f5f2ea32d40624b61440fe97f1f9b8
|
[
"MIT"
] | 1
|
2021-01-12T15:36:53.000Z
|
2021-01-12T15:36:53.000Z
|
case/__final__/02_concrete1.py
|
compas-Workshops/WS_structural-design
|
9a1222e728f5f2ea32d40624b61440fe97f1f9b8
|
[
"MIT"
] | null | null | null |
case/__final__/02_concrete1.py
|
compas-Workshops/WS_structural-design
|
9a1222e728f5f2ea32d40624b61440fe97f1f9b8
|
[
"MIT"
] | 2
|
2019-05-10T16:05:26.000Z
|
2019-06-11T16:24:48.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.utilities import pairwise
from compas.datastructures import mesh_flip_cycles
from compas.datastructures import Mesh
from compas_rhino.artists import MeshArtist
mesh = Mesh.from_json('cablenet.json')
# make lookup dicts for vertex normals of the cablenet
key_normal = {key: mesh.vertex_normal(key) for key in mesh.vertices()}
# ==============================================================================
# Intrados and extrados
# ==============================================================================
idos = mesh.copy()
edos = idos.copy()
# offset the intrados from the cablenet
# by 2cm in the direction of the corresponding cablenet vertex normal
for key, attr in idos.vertices(True):
nx, ny, nz = key_normal[key]
attr['x'] += 0.02 * nx
attr['y'] += 0.02 * ny
attr['z'] += 0.02 * nz
# offset the intrados from the intrados
# by 2cm in the direction of the corresponding cablenet vertex normal
for key, attr in edos.vertices(True):
nx, ny, nz = key_normal[key]
attr['x'] += 0.06 * nx
attr['y'] += 0.06 * ny
attr['z'] += 0.06 * nz
# ==============================================================================
# Concrete volume
# ==============================================================================
volume = idos.copy()
volume.name = 'concrete1'
# flip its cycles to make the bottom normals point downwards
mesh_flip_cycles(volume)
# set the key offset
dkey = volume._max_int_key + 1
# add the vertices of the extrados
for key, attr in edos.vertices(True):
volume.add_vertex(key=key + dkey, **attr)
# add the faces of the extrados
for fkey in edos.faces():
vertices = edos.face_vertices(fkey)
vertices = [key + dkey for key in vertices]
volume.add_face(vertices)
# construct a polygon of boundary vertices
boundary = edos.vertices_on_boundary(ordered=True)
boundary.append(boundary[0])
# add the "side" faces
for a, b in pairwise(boundary):
volume.add_face([b, a, a + dkey, b + dkey])
# ==============================================================================
# Export and visualisation
# ==============================================================================
# export
volume.to_json('concrete1.json')
# visualize
artist = MeshArtist(volume, layer="Concrete1")
artist.clear_layer()
artist.draw_mesh(color=(0, 0, 255))
| 28.823529
| 80
| 0.58
|
1aee9bc4a882e5281a9be9f475238a390c5e8785
| 174
|
py
|
Python
|
ewah_bool_utils/__init__.py
|
neutrinoceros/ewah_bool_utils
|
54e7a3c00c538dfd9d3d02df2f4a271907cd212c
|
[
"BSD-3-Clause"
] | null | null | null |
ewah_bool_utils/__init__.py
|
neutrinoceros/ewah_bool_utils
|
54e7a3c00c538dfd9d3d02df2f4a271907cd212c
|
[
"BSD-3-Clause"
] | null | null | null |
ewah_bool_utils/__init__.py
|
neutrinoceros/ewah_bool_utils
|
54e7a3c00c538dfd9d3d02df2f4a271907cd212c
|
[
"BSD-3-Clause"
] | null | null | null |
"""Top-level package for EWAH Bool Utils."""
__author__ = """Navaneeth Suresh"""
__email__ = 'navaneeths1998@gmail.com'
__version__ = '0.1.0'
from .ewah_bool_wrap import *
| 21.75
| 44
| 0.718391
|
0eb77234ad47a6dd5dea13b1109681afaca694e4
| 297
|
py
|
Python
|
meridian/tst/acupoints/test_jinmen12.py
|
sinotradition/meridian
|
8c6c1762b204b72346be4bbfb74dedd792ae3024
|
[
"Apache-2.0"
] | 5
|
2015-12-14T15:14:23.000Z
|
2022-02-09T10:15:33.000Z
|
meridian/tst/acupoints/test_jinmen12.py
|
sinotradition/meridian
|
8c6c1762b204b72346be4bbfb74dedd792ae3024
|
[
"Apache-2.0"
] | null | null | null |
meridian/tst/acupoints/test_jinmen12.py
|
sinotradition/meridian
|
8c6c1762b204b72346be4bbfb74dedd792ae3024
|
[
"Apache-2.0"
] | 3
|
2015-11-27T05:23:49.000Z
|
2020-11-28T09:01:56.000Z
|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
import unittest
from meridian.acupoints import jinmen12
class TestJinmen12Functions(unittest.TestCase):
def setUp(self):
pass
def test_xxx(self):
pass
if __name__ == '__main__':
unittest.main()
| 11.88
| 47
| 0.659933
|
0d9d74f1a6e59a1389ccb3cbf61a17c097debbd0
| 596
|
py
|
Python
|
sphinx_gallery/tests/tinybuild/examples/future/plot_future_imports.py
|
andriyor/sphinx-gallery
|
cc53540162613850c5bb19fa1172a1be960b1484
|
[
"BSD-3-Clause"
] | 309
|
2015-01-18T23:00:29.000Z
|
2022-03-24T15:27:51.000Z
|
sphinx_gallery/tests/tinybuild/examples/future/plot_future_imports.py
|
andriyor/sphinx-gallery
|
cc53540162613850c5bb19fa1172a1be960b1484
|
[
"BSD-3-Clause"
] | 891
|
2015-01-04T19:45:44.000Z
|
2022-03-31T02:36:49.000Z
|
sphinx_gallery/tests/tinybuild/examples/future/plot_future_imports.py
|
andriyor/sphinx-gallery
|
cc53540162613850c5bb19fa1172a1be960b1484
|
[
"BSD-3-Clause"
] | 197
|
2015-01-27T13:14:14.000Z
|
2022-03-28T20:16:39.000Z
|
"""
Test __future__ imports across cells
------------------------------------
This example tests that __future__ imports works across cells.
"""
from __future__ import division
from __future__ import print_function
import matplotlib
####################
# Dummy section, with :func:`sphinx_gallery.backreferences.NameFinder` ref.
assert 3/2 == 1.5
print(3/2, end='')
# testing reset of mpl
orig_dpi = 80. if matplotlib.__version__[0] < '2' else 100.
assert matplotlib.rcParams['figure.dpi'] == orig_dpi
matplotlib.rcParams['figure.dpi'] = 90.
assert matplotlib.rcParams['figure.dpi'] == 90.
| 25.913043
| 75
| 0.684564
|
e226afcedde04a1a49d46925fbb066d93955db0e
| 7,012
|
py
|
Python
|
src/pyFun/main.py
|
hardikparwana/segway_sim
|
792c8ed9e6e26e3e28e5f120be6822f178f17bf2
|
[
"MIT"
] | 10
|
2020-10-08T03:16:25.000Z
|
2021-10-19T02:58:53.000Z
|
src/pyFun/main.py
|
hardikparwana/segway_sim
|
792c8ed9e6e26e3e28e5f120be6822f178f17bf2
|
[
"MIT"
] | null | null | null |
src/pyFun/main.py
|
hardikparwana/segway_sim
|
792c8ed9e6e26e3e28e5f120be6822f178f17bf2
|
[
"MIT"
] | 5
|
2020-10-07T22:14:12.000Z
|
2022-02-21T01:22:21.000Z
|
import sys
import pdb
import scipy.io as sio
import numpy as np
import pickle
from utils import *
from MOMDP import MOMDP, MOMDP_TOQ, MOMDP_TO, MOMDP_Q
import os
def main():
load = 1 # 0 = compute policy and save policy object, 1 = load policy object, -1 = compute policy but DO NOT save it
digitsResults = 6 # number of digits to print, just for visual output
printLevel = 3 # 0 = only value function update and results, 1 = minimal, 2 = verbose
# Evaluate single policy. Details in evaluateSinglePolicy() function
discOpt = 1
evaluateSinglePolicy(load, digitsResults, printLevel, discOpt)
# # Save .txt file with results for Table I
# discOpt = 1 # 1 = 2^{n_u} + n_u belief points, 2 = 2(2^{n_u}) belief points
# gridWorldList = ['5x5' , '10x5', '15x15' ]
# obstList = [3, 4]
# policyList = ['TO', 'Q', 'TOQ']
# evaluateAllPolicies(load, digitsResults,printLevel, discOpt, gridWorldList, obstList, policyList)
# # Save .txt file with results for Table II
# discOpt = 2 # 1 = 2^{n_u} + n_u belief points, 2 = 2(2^{n_u}) belief points
# gridWorldList = ['15x15' ]
# obstList = [4]
# evaluateAllPolicies(load, digitsResults,printLevel, discOpt, gridWorldList, obstList, policyList)
def evaluateSinglePolicy(load, digitsResults, printLevel, discOpt):
# gridWorld = '5x5'
# numObst = 3
# policy = 'TOQ'
# momdp = getMOMDP(gridWorld, numObst, policy, printLevel, load, discOpt, unGoal = False)
gridWorld = '8x8ug'
numObst = 2
policy = 'Q'
momdp = getMOMDP(gridWorld, numObst, policy, printLevel, load, discOpt, unGoal = True)
# # Evaluate expected cost and probability of failure
# results = runAllSim(momdp, gridWorld, numObst, policy, printLevel, digitsResults)
# Run a simulation for an environment realization which is defined in the function loadParameters() from the file utils.py
results = runSim(momdp, gridWorld, numObst, policy, printLevel, digitsResults)
def evaluateAllPolicies(load, digitsResults, printLevel, discOpt, gridWorldList, obstList, policyList):
resultsList = []
for gridWorld in gridWorldList:
for numObst in obstList:
for policy in policyList:
momdp = getMOMDP(gridWorld, numObst, policy, printLevel, load, discOpt)
results = runAllSim(momdp, gridWorld, numObst, policy, printLevel, digitsResults)
resultsList.append(results)
# Save and print to screen the results
print("====== Results for expected cost and prob sat specs")
fileToWrite = open("result_expected_cost_Table_"+str(discOpt)+".txt","w")
for i in range(0, len(resultsList)):
print(resultsList[i][0])
fileToWrite.writelines(resultsList[i][0]+'\n')
fileToWrite.close() #to change file access modes
fileToWrite = open("result_expected_time_Table_"+str(discOpt)+".txt","w")
print("====== Results for expected time and prob of failure")
for i in range(0, len(resultsList)):
print(resultsList[i][1])
fileToWrite.writelines(resultsList[i][1]+'\n')
fileToWrite.close() #to change file access modes
def getMOMDP(gridWorld, numObst, policy, printLevel, load, discOpt, unGoal = False, valFunFlag = True):
totTimeSteps, _, _ = loadParameters(gridWorld, numObst, unGoal)
if unGoal == False:
directory = 'data/'+policy+'_'+str(discOpt)+'/'
fileName = 'MOMDP_obj_'+gridWorld+'_'+str(numObst)+'.pkl'
else:
directory = 'data/'+policy+'ug_'+str(discOpt)+'/'
fileName = 'MOMDP_obj_'+gridWorld+'_'+str(numObst)+'.pkl'
if not os.path.exists(directory):
os.makedirs(directory)
if load <= 0: # If load <= 0 compute the policy and store it if load == 0
gridVar = loadGrid(gridWorld+'_'+str(numObst))
if policy == 'TOQ':
# momdp = MOMDP_TOQ_notVectorized(gridVar, totTimeSteps,printLevel, policy, discOpt)
momdp = MOMDP_TOQ(gridVar, totTimeSteps,printLevel, policy, discOpt, unGoal, valFunFlag)
elif policy == 'Q':
momdp = MOMDP_Q(gridVar, totTimeSteps,printLevel, policy, discOpt, unGoal, valFunFlag)
elif policy == 'TO':
momdp = MOMDP_TO(gridVar, totTimeSteps,printLevel, policy, discOpt, unGoal, valFunFlag)
if load == 0:
pickle_out = open(directory+fileName,"wb")
pickle.dump(momdp, pickle_out)
pickle_out.close()
else:
pickle_in = open(directory+fileName,"rb")
momdp = pickle.load(pickle_in)
return momdp
def runSim(momdp, gridWorld, numObst, policy, printLevel, digits):
totTimeSteps, loc, initBelief = loadParameters(gridWorld, numObst, momdp.unGoal)
bt = [momdp.initBelief(initBelief)] # initial belief
xt = [0] # initial state
V_t0, Vrealized, J_t0, failure,tRun, xt, bt = eveluateMOMDP(momdp, loc, initBelief, xt, bt, printLevel)
plotFun(momdp, xt, bt)
# Print and store results
print("================ Final Results for ", policy, " Policy in ", gridWorld, " grid world with ", numObst, "obstacles.")
print("Vrealized: ", Vrealized, " and V0: ", V_t0)
print("Prob Sat Spec: ", 1 - failure, " and lower bound: ", J_t0)
print("Time: ", tRun)
print("Policy synthesis time: ", momdp.totTime, " Avg backup time: ", momdp.avgBackupTime)
def runAllSim(momdp, gridWorld, numObst, policy, printLevel, digits):
totTimeSteps, _, initBelief = loadParameters(gridWorld, numObst, momdp.unGoal)
probFailure = 0
expCost = 0
expTime = 0
for i in range(0,len(momdp.comb)): # loop over all possible 2^numObst obstacle configurations
loc = momdp.comb[i] # initialize true obstacle location
bt = [momdp.initBelief(initBelief)] # initial belief
xt = [0] # initial state
V_t0, Vrealized, J_t0, failure,tRun, _, _ = eveluateMOMDP(momdp, loc, initBelief, xt, bt, printLevel)
probFailure += failure*bt[0][i] # add prob failure times probability of this scenario
expCost += Vrealized*bt[0][i] # add cost times probability of this scenario
expTime += tRun*bt[0][i]*(failure==0)
# Print and store results
print("================ Final Results for ", policy, " Policy in ", gridWorld, " grid world with ", numObst, "obstacles.")
print("Expected cost: ", expCost, " and V0: ", V_t0)
print("Prob Sat Spec: ", 1 - probFailure, " and lower bound: ", J_t0)
print("Expected time: ", expTime)
print("Prob Failure: ", probFailure, " and upper bound: ", 1-J_t0)
print("Policy synthesis time: ", momdp.totTime, " Avg backup time: ", momdp.avgBackupTime)
if policy == 'TO': policy = 'TO '
if policy == 'Q': policy = 'Q '
stringTest = policy+"_"+gridWorld+"_"+str(numObst)
stringCost = " || ExpCost: "+str(round(expCost,digits))+", V0: "+str(round(V_t0,digits))
stringProb = " || P specs: "+str(round(1-probFailure,digits))+", J0: "+str(round(J_t0,digits))
stringTime = " || Tot Time: "+str(round(momdp.totTime,digits))+", backup time: "+str(round(momdp.avgBackupTime,digits))
stringExpT = " || ExpTime: "+str(round(expTime,digits))
stringFail = " || P fail: "+str(round(probFailure,digits))+" and upper-bound 1-J0: "+str(round(1-J_t0,digits))
return [stringTest+stringCost+stringProb+stringTime, stringTest+stringExpT+stringFail+stringTime]
if __name__ == "__main__":
# execute only if run as a script
main()
| 43.283951
| 126
| 0.700656
|
fc66218f6e4aadb48cbabc589260e2859e1f6ff6
| 409
|
py
|
Python
|
selenium/zadania/zadanie_1.py
|
lblaszkowski/jaktestowac
|
e8f0af228792a5a2cfa7c7845c9e70869a275a5b
|
[
"Apache-2.0"
] | null | null | null |
selenium/zadania/zadanie_1.py
|
lblaszkowski/jaktestowac
|
e8f0af228792a5a2cfa7c7845c9e70869a275a5b
|
[
"Apache-2.0"
] | null | null | null |
selenium/zadania/zadanie_1.py
|
lblaszkowski/jaktestowac
|
e8f0af228792a5a2cfa7c7845c9e70869a275a5b
|
[
"Apache-2.0"
] | null | null | null |
# Rekruter chce sprawdzić, że nie tylko potrafisz przepisać kod z tutoriala więc prosi Ciebie
# abyś przetestował stronę wylosowaną przez https://www.discuvver.com/ (kliknij Take me to a useful website!).
from selenium import webdriver
driver = webdriver.Chrome()
driver.get('http://e.ggtimer.com/')
title = driver.title
print(title)
assert title == "E.gg Timer - a simple countdown timer"
driver.close()
| 34.083333
| 110
| 0.762836
|
e3f62008a5ec6599b27eff3201c75d80ba919a56
| 791
|
py
|
Python
|
client/sources/scheme_test/__init__.py
|
akshitdewan/ok-client
|
3c5eca17100eed808023a815654cfe1c95179080
|
[
"Apache-2.0"
] | 30
|
2018-07-10T17:32:49.000Z
|
2022-01-03T16:50:56.000Z
|
client/sources/scheme_test/__init__.py
|
akshitdewan/ok-client
|
3c5eca17100eed808023a815654cfe1c95179080
|
[
"Apache-2.0"
] | 62
|
2018-08-07T18:43:33.000Z
|
2022-02-17T20:53:03.000Z
|
client/sources/scheme_test/__init__.py
|
akshitdewan/ok-client
|
3c5eca17100eed808023a815654cfe1c95179080
|
[
"Apache-2.0"
] | 26
|
2018-11-13T22:12:47.000Z
|
2022-03-20T00:42:26.000Z
|
from client import exceptions as ex
from client.sources.scheme_test import models
import os
def load(file, _, assign):
"""Loads Scheme tests from a specified filepath.
PARAMETERS:
file -- str; a filepath to a Scheme file.
RETURNS:
Test
"""
if not os.path.isfile(file) or not file.endswith('.scm'):
raise ex.LoadingException('Cannot run Scheme tests from {}'.format(file))
with open(file, 'r') as f:
file_contents = f.read()
try:
return {file: models.SchemeTest(file, file_contents, assign.cmd_args.timeout,
name=file, points=1)}
except ex.SerializeException:
raise ex.LoadingException('Unable to load Scheme test '
'from {}'.format(file))
| 29.296296
| 85
| 0.610619
|
228bd0d2dc82d566181dc14633ce494804d50e6e
| 3,507
|
py
|
Python
|
tests/data/test_topology.py
|
vasp-dev/py4vasp
|
576c5c23d740b11687e37b6dd332165bc339cf16
|
[
"Apache-2.0"
] | 27
|
2022-02-18T18:43:17.000Z
|
2022-03-29T22:05:41.000Z
|
tests/data/test_topology.py
|
vasp-dev/py4vasp
|
576c5c23d740b11687e37b6dd332165bc339cf16
|
[
"Apache-2.0"
] | 3
|
2022-02-18T18:52:33.000Z
|
2022-03-28T13:26:59.000Z
|
tests/data/test_topology.py
|
vasp-dev/py4vasp
|
576c5c23d740b11687e37b6dd332165bc339cf16
|
[
"Apache-2.0"
] | 4
|
2022-02-07T12:21:23.000Z
|
2022-03-13T21:36:03.000Z
|
# Copyright © VASP Software GmbH,
# Licensed under the Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from py4vasp.data import Topology
from py4vasp.data._selection import Selection
import py4vasp.exceptions as exception
import py4vasp._util.selection as selection
import pytest
import numpy as np
import pandas as pd
@pytest.fixture
def Sr2TiO4(raw_data):
return Topology(raw_data.topology("Sr2TiO4"))
def test_read(Sr2TiO4):
topology = Sr2TiO4.read()
assert topology["Sr"] == Selection(indices=slice(0, 2), label="Sr")
assert topology["Ti"] == Selection(indices=slice(2, 3), label="Ti")
assert topology["O"] == Selection(indices=slice(3, 7), label="O")
assert topology["1"] == Selection(indices=slice(0, 1), label="Sr_1")
assert topology["2"] == Selection(indices=slice(1, 2), label="Sr_2")
assert topology["3"] == Selection(indices=slice(2, 3), label="Ti_1")
assert topology["4"] == Selection(indices=slice(3, 4), label="O_1")
assert topology["5"] == Selection(indices=slice(4, 5), label="O_2")
assert topology["6"] == Selection(indices=slice(5, 6), label="O_3")
assert topology["7"] == Selection(indices=slice(6, 7), label="O_4")
assert topology[selection.all] == Selection(indices=slice(7))
def test_to_frame(Sr2TiO4):
actual = Sr2TiO4.to_frame()
ref_data = {
"name": ("Sr_1", "Sr_2", "Ti_1", "O_1", "O_2", "O_3", "O_4"),
"element": 2 * ("Sr",) + ("Ti",) + 4 * ("O",),
}
reference = pd.DataFrame(ref_data)
assert reference.equals(actual)
def test_to_mdtraj(Sr2TiO4):
actual, _ = Sr2TiO4.to_mdtraj().to_dataframe()
num_atoms = Sr2TiO4.number_atoms()
ref_data = {
"serial": num_atoms * (None,),
"name": ("Sr_1", "Sr_2", "Ti_1", "O_1", "O_2", "O_3", "O_4"),
"element": 2 * ("Sr",) + ("Ti",) + 4 * ("O",),
"resSeq": num_atoms * (0,),
"resName": num_atoms * ("crystal",),
"chainID": num_atoms * (0,),
"segmentID": num_atoms * ("",),
}
reference = pd.DataFrame(ref_data)
assert reference.equals(actual)
def test_to_poscar(Sr2TiO4):
assert Sr2TiO4.to_poscar() == "Sr Ti O\n2 1 4"
assert Sr2TiO4.to_poscar(".format.") == "Sr Ti O.format.\n2 1 4"
with pytest.raises(exception.IncorrectUsage):
Sr2TiO4.to_poscar(None)
def test_elements(Sr2TiO4):
assert Sr2TiO4.elements() == ["Sr", "Sr", "Ti", "O", "O", "O", "O"]
def test_ion_types(Sr2TiO4):
assert Sr2TiO4.ion_types() == ["Sr", "Ti", "O"]
def test_names(Sr2TiO4):
assert Sr2TiO4.names() == ["Sr_1", "Sr_2", "Ti_1", "O_1", "O_2", "O_3", "O_4"]
def test_number_atoms(Sr2TiO4):
assert Sr2TiO4.number_atoms() == 7
def test_print(Sr2TiO4, format_):
actual, _ = format_(Sr2TiO4)
reference = {"text/plain": "Sr2TiO4", "text/html": "Sr<sub>2</sub>TiO<sub>4</sub>"}
assert actual == reference
def test_descriptor(Sr2TiO4, check_descriptors):
descriptors = {
"_to_dict": ["to_dict", "read"],
"_to_frame": ["to_frame"],
"_to_poscar": ["to_poscar"],
"_to_mdtraj": ["to_mdtraj"],
"_elements": ["elements"],
"_ion_types": ["ion_types"],
"_names": ["names"],
"_number_atoms": ["number_atoms"],
}
check_descriptors(Sr2TiO4, descriptors)
def test_from_file(raw_data, mock_file, check_read):
raw_topology = raw_data.topology("Sr2TiO4")
with mock_file("topology", raw_topology) as mocks:
check_read(Topology, mocks, raw_topology)
| 33.4
| 87
| 0.629598
|
4ac7b77e5fbd5ae28e5ce8671da533db0767e31e
| 13,603
|
py
|
Python
|
hubtraf/user.py
|
dfeddema/hubtraf
|
379ddc1fd747d9a6658965462da458c571d4aef8
|
[
"BSD-3-Clause"
] | null | null | null |
hubtraf/user.py
|
dfeddema/hubtraf
|
379ddc1fd747d9a6658965462da458c571d4aef8
|
[
"BSD-3-Clause"
] | null | null | null |
hubtraf/user.py
|
dfeddema/hubtraf
|
379ddc1fd747d9a6658965462da458c571d4aef8
|
[
"BSD-3-Clause"
] | null | null | null |
from enum import Enum, auto
import aiohttp
import socket
import uuid
import random
from yarl import URL
import asyncio
import async_timeout
import structlog
import time
import colorama
logger = structlog.get_logger()
class User:
class States(Enum):
CLEAR = 1
LOGGED_IN = 2
SERVER_STARTED = 3
KERNEL_STARTED = 4
async def __aenter__(self):
self.session = aiohttp.ClientSession()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.session.close()
def __init__(self, username, hub_url, login_handler):
"""
A simulated JupyterHub user.
username - name of the user.
hub_url - base url of the hub.
login_handler - a awaitable callable that will be passed the following parameters:
username
session (aiohttp session object)
log (structlog log object)
hub_url (yarl URL object)
It should 'log in' the user with whatever requests it needs to
perform. If no uncaught exception is thrown, login is considered
a success.
Usually a partial of a generic function is passed in here.
"""
self.username = username
self.hub_url = URL(hub_url)
self.state = User.States.CLEAR
self.notebook_url = self.hub_url / 'user' / self.username
print("self.notebook_url= ", self.notebook_url)
self.log = logger.bind(
username=username
)
self.login_handler = login_handler
self.headers = {
'Referer': str(self.hub_url / 'hub/')
}
def success(self, kind, **kwargs):
kwargs_pretty = " ".join([f"{k}:{v}" for k, v in kwargs.items()])
print(f'{colorama.Fore.GREEN}Success:{colorama.Style.RESET_ALL}', kind, self.username, kwargs_pretty)
def failure(self, kind, **kwargs):
kwargs_pretty = " ".join([f"{k}:{v}" for k, v in kwargs.items()])
print(f'{colorama.Fore.RED}Failure:{colorama.Style.RESET_ALL}', kind, self.username, kwargs_pretty)
def debug(self, kind, **kwargs):
kwargs_pretty = " ".join([f"{k}:{v}" for k, v in kwargs.items()])
print(f'{colorama.Fore.YELLOW}Debug:{colorama.Style.RESET_ALL}', kind, self.username, kwargs_pretty)
async def login(self):
"""
Log in to the JupyterHub.
We only log in, and try to not start the server itself. This
makes our testing code simpler, but we need to be aware of the fact this
might cause differences vs how users normally use this.
"""
# We only log in if we haven't done anything already!
assert self.state == User.States.CLEAR
start_time = time.monotonic()
logged_in = await self.login_handler(log=self.log, hub_url=self.hub_url, session=self.session, username=self.username)
if not logged_in:
return False
hub_cookie = self.session.cookie_jar.filter_cookies(self.hub_url).get('hub', None)
if hub_cookie:
self.log = self.log.bind(hub=hub_cookie.value)
self.success('login', duration=time.monotonic() - start_time)
self.state = User.States.LOGGED_IN
return True
async def ensure_server_api(self, api_token, timeout=300, spawn_refresh_time=30):
api_url = self.hub_url / 'hub/api'
self.headers['Authorization'] = f'token {api_token}'
async def server_running():
async with self.session.get(api_url / 'users' / self.username, headers=self.headers) as resp:
userinfo = await resp.json()
# userinfo = await resp.json(content_type=None)
server = userinfo.get('servers', {}).get('', {})
self.debug('server-start', phase='waiting', ready=server.get('ready'), pending=server.get('pending'))
return server.get('ready', False)
self.debug('server-start', phase='start')
start_time = time.monotonic()
async with self.session.post(api_url / 'users' / self.username / 'server', headers=self.headers) as resp:
if resp.status == 201:
# Server created
# FIXME: Verify this server is actually up
self.success('server-start', duration=time.monotonic() - start_time)
self.state = User.States.SERVER_STARTED
return True
elif resp.status == 202:
# Server start request received, not necessarily started
# FIXME: Verify somehow?
self.debug('server-start', phase='waiting')
while not (await server_running()):
await asyncio.sleep(0.5)
self.success('server-start', duration=time.monotonic() - start_time)
self.state = User.States.SERVER_STARTED
return True
elif resp.status == 400:
body = await resp.json()
if body['message'] == f'{self.username} is already running':
self.state = User.States.SERVER_STARTED
return True
print(await resp.json())
print(resp.request_info)
return False
async def ensure_server_simulate(self, timeout=300, spawn_refresh_time=30):
assert self.state == User.States.LOGGED_IN
start_time = time.monotonic()
self.debug('server-start', phase='start')
i = 0
while True:
i += 1
self.debug('server-start', phase='attempt-start', attempt=i + 1)
try:
resp = await self.session.get(self.hub_url / 'hub/spawn')
except Exception as e:
self.debug('server-start', exception=str(e), attempt=i + 1, phase='attempt-failed', duration=time.monotonic() - start_time)
continue
# Check if paths match, ignoring query string (primarily, redirects=N), fragments
target_url_tree = self.notebook_url / 'tree'
if resp.url.scheme == target_url_tree.scheme and resp.url.host == target_url_tree.host and resp.url.path == target_url_tree.path:
self.success('server-start', phase='complete', attempt=i + 1, duration=time.monotonic() - start_time)
break
target_url_lab = self.notebook_url / 'lab'
if resp.url.scheme == target_url_lab.scheme and resp.url.host == target_url_lab.host and resp.url.path == target_url_lab.path:
self.success('server-start', phase='complete', attempt=i + 1, duration=time.monotonic() - start_time)
break
if time.monotonic() - start_time >= timeout:
self.failure('server-start', phase='failed', duration=time.monotonic() - start_time, reason='timeout')
return False
# Always log retries, so we can count 'in-progress' actions
self.debug('server-start', resp=str(resp), phase='attempt-complete', duration=time.monotonic() - start_time, attempt=i + 1)
# FIXME: Add jitter?
await asyncio.sleep(random.uniform(0, spawn_refresh_time))
self.state = User.States.SERVER_STARTED
self.headers['X-XSRFToken'] = self.xsrf_token
return True
async def stop_server(self):
#assert self.state == User.States.SERVER_STARTED
self.debug('server-stop', phase='start')
start_time = time.monotonic()
try:
resp = await self.session.delete(
self.hub_url / 'hub/api/users' / self.username / 'server',
headers=self.headers
)
except Exception as e:
self.failure('server-stop', exception=str(e), duration=time.monotonic() - start_time)
return False
if resp.status != 202 and resp.status != 204:
self.failure('server-stop', exception=str(resp), duration=time.monotonic() - start_time)
return False
self.success('server-stop', duration=time.monotonic() - start_time)
self.state = User.States.LOGGED_IN
return True
async def start_kernel(self):
assert self.state == User.States.SERVER_STARTED
self.debug('kernel-start', phase='start')
start_time = time.monotonic()
try:
resp = await self.session.post(self.notebook_url / 'api/kernels', headers=self.headers)
except Exception as e:
self.failure('kernel-start', exception=str(e), duration=time.monotonic() - start_time)
return False
if resp.status != 201:
self.failure('kernel-start', exception=str(resp), duration=time.monotonic() - start_time)
return False
self.kernel_id = (await resp.json())['id']
self.success('kernel-start', duration=time.monotonic() - start_time)
self.state = User.States.KERNEL_STARTED
return True
@property
def xsrf_token(self):
notebook_cookies = self.session.cookie_jar.filter_cookies(self.notebook_url)
assert '_xsrf' in notebook_cookies
xsrf_token = notebook_cookies['_xsrf'].value
return xsrf_token
async def stop_kernel(self):
assert self.state == User.States.KERNEL_STARTED
self.debug('kernel-stop', phase='start')
start_time = time.monotonic()
try:
resp = await self.session.delete(self.notebook_url / 'api/kernels' / self.kernel_id, headers=self.headers)
except Exception as e:
self.failure('kernel-stop', exception=str(e), duration=time.monotonic() - start_time)
return False
if resp.status != 204:
self.failure('kernel-stop', exception=str(resp), duration=time.monotonic() - start_time)
return False
self.success('kernel-stop', duration=time.monotonic() - start_time)
self.state = User.States.SERVER_STARTED
return True
def request_execute_code(self, msg_id, code):
return {
"header": {
"msg_id": msg_id,
"username": self.username,
"msg_type": "execute_request",
"version": "5.2"
},
"metadata": {},
"content": {
"code": code,
"silent": False,
"store_history": True,
"user_expressions": {},
"allow_stdin": True,
"stop_on_error": True
},
"buffers": [],
"parent_header": {},
"channel": "shell"
}
async def assert_code_output(self, code, output, execute_timeout, repeat_time_seconds=None):
channel_url = self.notebook_url / 'api/kernels' / self.kernel_id / 'channels'
self.debug('kernel-connect', phase='start')
is_connected = False
try:
async with self.session.ws_connect(channel_url, headers=self.headers) as ws:
is_connected = True
self.debug('kernel-connect', phase='complete')
start_time = time.monotonic()
iteration = 0
self.debug('code-execute', phase='start')
while True:
exec_start_time = time.monotonic()
iteration += 1
msg_id = str(uuid.uuid4())
await ws.send_json(self.request_execute_code(msg_id, code))
async for msg_text in ws:
if msg_text.type != aiohttp.WSMsgType.TEXT:
self.failure(
'code-execute',
iteration=iteration,
message=str(msg_text),
duration=time.monotonic() - exec_start_time
)
return False
msg = msg_text.json()
if 'parent_header' in msg and msg['parent_header'].get('msg_id') == msg_id:
# These are responses to our request
if msg['channel'] == 'iopub':
response = None
if msg['msg_type'] == 'execute_result':
response = msg['content']['data']['text/plain']
elif msg['msg_type'] == 'stream':
response = msg['content']['text']
if response:
assert response == output
duration = time.monotonic() - exec_start_time
break
if repeat_time_seconds:
if time.monotonic() - start_time >= repeat_time_seconds:
break
else:
# Sleep a random amount of time between 0 and 1s, so we aren't busylooping
await asyncio.sleep(random.uniform(0, 1))
continue
else:
break
self.success(
'code-execute',
duration=duration, iteration=iteration
)
return True
except Exception as e:
self.failure('code-execute', exception=str(e))
return False
| 42.642633
| 141
| 0.556936
|
81cba28dcd0c68cc57e928cf586de92b228bc2a3
| 1,027
|
py
|
Python
|
pytudes/_2021/educative/grokking_the_coding_interview/sliding_window/_1__maximum_sum_subarray_of_size_k__easy.py
|
TeoZosa/pytudes
|
4f01ab20f936bb4b3f42d1946180d4a20fd95fbf
|
[
"Apache-2.0"
] | 1
|
2022-02-08T09:47:35.000Z
|
2022-02-08T09:47:35.000Z
|
pytudes/_2021/educative/grokking_the_coding_interview/sliding_window/_1__maximum_sum_subarray_of_size_k__easy.py
|
TeoZosa/pytudes
|
4f01ab20f936bb4b3f42d1946180d4a20fd95fbf
|
[
"Apache-2.0"
] | 62
|
2021-04-02T23:41:16.000Z
|
2022-03-25T13:16:10.000Z
|
pytudes/_2021/educative/grokking_the_coding_interview/sliding_window/_1__maximum_sum_subarray_of_size_k__easy.py
|
TeoZosa/pytudes
|
4f01ab20f936bb4b3f42d1946180d4a20fd95fbf
|
[
"Apache-2.0"
] | null | null | null |
"""https://www.educative.io/courses/grokking-the-coding-interview/JPKr0kqLGNP
"""
def max_sum_sub_array_of_size_k(arr: list[int], K: int) -> int:
"""
Examples:
>>> max_sum_sub_array_of_size_k([2, 1, 5, 1, 3, 2],3)
9
>>> max_sum_sub_array_of_size_k([2, 3, 4, 1, 5],2)
7
>>> max_sum_sub_array_of_size_k([],2)
0
>>> max_sum_sub_array_of_size_k([1],0)
0
"""
## EDGE CASES ##
if not arr or K <= 0:
return 0
"""ALGORITHM"""
get_curr_win_size = lambda: window_end - window_start + 1
## INITIALIZE VARS ##
max_sum, window_sum = 0, 0
window_start = 0
## SLIDING
for window_end in range(len(arr)):
## EXPANSION
window_sum += arr[window_end] # add the next element
## WINDOW MATCH
if get_curr_win_size() == K:
max_sum = max(max_sum, window_sum)
## CONTRACTION
window_sum -= arr[window_start]
window_start += 1
return max_sum
| 24.452381
| 77
| 0.561831
|
9af7f0283f1278f8ba473d09841de13190ec3dc0
| 12,362
|
py
|
Python
|
cinder/volume/targets/iet.py
|
yanheven/cinder
|
89797971f30d547acbf715fea099c52d90966d1f
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/targets/iet.py
|
yanheven/cinder
|
89797971f30d547acbf715fea099c52d90966d1f
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/targets/iet.py
|
yanheven/cinder
|
89797971f30d547acbf715fea099c52d90966d1f
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import stat
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _LI, _LE, _LW
from cinder import utils
from cinder.volume.targets import iscsi
LOG = logging.getLogger(__name__)
class IetAdm(iscsi.ISCSITarget):
VERSION = '0.1'
def __init__(self, *args, **kwargs):
super(IetAdm, self).__init__(*args, **kwargs)
self.iet_conf = self.configuration.safe_get('iet_conf')
self.iscsi_iotype = self.configuration.safe_get('iscsi_iotype')
self.auth_type = 'IncomingUser'
self.iet_sessions = '/proc/net/iet/session'
def _get_target(self, iqn):
# Find existing iSCSI target session from /proc/net/iet/session
#
# tid:2 name:iqn.2010-10.org:volume-222
# sid:562950561399296 initiator:iqn.1994-05.com:5a6894679665
# cid:0 ip:192.168.122.1 state:active hd:none dd:none
# tid:1 name:iqn.2010-10.org:volume-111
# sid:281475567911424 initiator:iqn.1994-05.com:5a6894679665
# cid:0 ip:192.168.122.1 state:active hd:none dd:none
iscsi_target = 0
try:
with open(self.iet_sessions, 'r') as f:
sessions = f.read()
except Exception:
LOG.exception(_LE("Failed to open iet session list for %s"), iqn)
raise
session_list = re.split('^tid:(?m)', sessions)[1:]
for ses in session_list:
m = re.match('(\d+) name:(\S+)\s+', ses)
if m and iqn in m.group(2):
return m.group(1)
return iscsi_target
def _get_iscsi_target(self, context, vol_id):
pass
def _get_target_and_lun(self, context, volume):
# For ietadm dev starts at lun 0
lun = 0
# Using 0, ietadm tries to search empty tid for creating
# new iSCSI target
iscsi_target = 0
# Find existing iSCSI target based on iqn
iqn = '%svolume-%s' % (self.iscsi_target_prefix, volume['id'])
iscsi_target = self._get_target(iqn)
return iscsi_target, lun
def _get_target_chap_auth(self, context, name):
vol_id = name.split(':')[1]
if os.path.exists(self.iet_conf):
try:
with utils.temporary_chown(self.iet_conf):
with open(self.iet_conf, 'r') as f:
iet_conf_text = f.readlines()
except Exception:
# If we fail to handle config file, raise exception here to
# prevent unexpected behavior during subsequent operations.
LOG.exception(_LE("Failed to open config for %s."), vol_id)
raise
target_found = False
for line in iet_conf_text:
if target_found:
m = re.search('(\w+) (\w+) (\w+)', line)
if m:
return (m.group(2), m.group(3))
else:
LOG.debug("Failed to find CHAP auth from config "
"for %s", vol_id)
return None
elif name in line:
target_found = True
else:
# Missing config file is unxepected sisuation. But we will create
# new config file during create_iscsi_target(). Just we warn the
# operator here.
LOG.warn(_LW("Failed to find CHAP auth from config for "
"%(vol_id)s. Config file %(conf)s does not exist."),
{'vol_id': vol_id, 'conf': self.iet_conf})
return None
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
config_auth = None
vol_id = name.split(':')[1]
# Check the target is already existing.
tmp_tid = self._get_target(name)
# Create a new iSCSI target. If a target already exists,
# the command returns 234, but we ignore it.
try:
self._new_target(name, tid)
tid = self._get_target(name)
self._new_logicalunit(tid, lun, path)
if chap_auth is not None:
(username, password) = chap_auth
config_auth = ' '.join((self.auth_type,) + chap_auth)
self._new_auth(tid, self.auth_type, username, password)
except putils.ProcessExecutionError:
LOG.exception(_LE("Failed to create iscsi target for volume "
"id:%s"), vol_id)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
# Update config file only if new scsi target is created.
if not tmp_tid:
self.update_config_file(name, tid, path, config_auth)
return tid
def update_config_file(self, name, tid, path, config_auth):
conf_file = self.iet_conf
vol_id = name.split(':')[1]
# If config file does not exist, create a blank conf file and
# add configuration for the volume on the new file.
if not os.path.exists(conf_file):
try:
utils.execute("truncate", conf_file, "--size=0",
run_as_root=True)
except putils.ProcessExecutionError:
LOG.exception(_LE("Failed to create %(conf)s for volume "
"id:%(vol_id)s"),
{'conf': conf_file, 'vol_id': vol_id})
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
try:
volume_conf = """
Target %s
%s
Lun 0 Path=%s,Type=%s
""" % (name, config_auth, path, self._iotype(path))
with utils.temporary_chown(conf_file):
with open(conf_file, 'a+') as f:
f.write(volume_conf)
except Exception:
LOG.exception(_LE("Failed to update %(conf)s for volume "
"id:%(vol_id)s"),
{'conf': conf_file, 'vol_id': vol_id})
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_LI("Removing iscsi_target for volume: %s"), vol_id)
try:
self._delete_logicalunit(tid, lun)
session_info = self._find_sid_cid_for_target(tid, vol_name, vol_id)
if session_info:
sid, cid = session_info
self._force_delete_target(tid, sid, cid)
self._delete_target(tid)
except putils.ProcessExecutionError:
LOG.exception(_LE("Failed to remove iscsi target for volume "
"id:%s"), vol_id)
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
vol_uuid_file = vol_name
conf_file = self.iet_conf
if os.path.exists(conf_file):
try:
with utils.temporary_chown(conf_file):
with open(conf_file, 'r+') as iet_conf_text:
full_txt = iet_conf_text.readlines()
new_iet_conf_txt = []
count = 0
for line in full_txt:
if count > 0:
count -= 1
continue
elif vol_uuid_file in line:
count = 2
continue
else:
new_iet_conf_txt.append(line)
iet_conf_text.seek(0)
iet_conf_text.truncate(0)
iet_conf_text.writelines(new_iet_conf_txt)
except Exception:
LOG.exception(_LE("Failed to update %(conf)s for volume id "
"%(vol_id) after removing iscsi target"),
{'conf': conf_file, 'vol_id': vol_id})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
else:
LOG.warn(_LW("Failed to update %(conf)s for volume id %(vol_id) "
"after removing iscsi target. "
"%(conf)s does not exist."),
{'conf': conf_file, 'vol_id': vol_id})
def _find_sid_cid_for_target(self, tid, name, vol_id):
"""Find sid, cid for existing iscsi target"""
try:
with open(self.iet_sessions, 'r') as f:
sessions = f.read()
except Exception as e:
LOG.info(_LI("Failed to open iet session list for "
"%(vol_id)s: %(e)s"),
{'vol_id': vol_id, 'e': e})
return None
session_list = re.split('^tid:(?m)', sessions)[1:]
for ses in session_list:
m = re.match('(\d+) name:(\S+)\s+sid:(\d+).+\s+cid:(\d+)', ses)
if m and tid in m.group(1) and name in m.group(2):
return m.group(3), m.group(4)
def _is_block(self, path):
mode = os.stat(path).st_mode
return stat.S_ISBLK(mode)
def _iotype(self, path):
if self.iscsi_iotype == 'auto':
return 'blockio' if self._is_block(path) else 'fileio'
else:
return self.iscsi_iotype
def _new_target(self, name, tid):
"""Create new scsi target using specified parameters.
If the target already exists, ietadm returns
'Invalid argument' and error code '234'.
This should be ignored for ensure export case.
"""
utils.execute('ietadm', '--op', 'new',
'--tid=%s' % tid,
'--params', 'Name=%s' % name,
run_as_root=True, check_exit_code=[0, 234])
def _delete_target(self, tid):
utils.execute('ietadm', '--op', 'delete',
'--tid=%s' % tid,
run_as_root=True)
def _force_delete_target(self, tid, sid, cid):
utils.execute('ietadm', '--op', 'delete',
'--tid=%s' % tid,
'--sid=%s' % sid,
'--cid=%s' % cid,
run_as_root=True)
def show_target(self, tid, iqn=None):
utils.execute('ietadm', '--op', 'show',
'--tid=%s' % tid,
run_as_root=True)
def _new_logicalunit(self, tid, lun, path):
"""Attach a new volume to scsi target as a logical unit.
If a logical unit exists on the specified target lun,
ietadm returns 'File exists' and error code '239'.
This should be ignored for ensure export case.
"""
utils.execute('ietadm', '--op', 'new',
'--tid=%s' % tid,
'--lun=%d' % lun,
'--params',
'Path=%s,Type=%s' % (path, self._iotype(path)),
run_as_root=True, check_exit_code=[0, 239])
def _delete_logicalunit(self, tid, lun):
utils.execute('ietadm', '--op', 'delete',
'--tid=%s' % tid,
'--lun=%d' % lun,
run_as_root=True)
def _new_auth(self, tid, type, username, password):
utils.execute('ietadm', '--op', 'new',
'--tid=%s' % tid,
'--user',
'--params=%s=%s,Password=%s' % (type,
username,
password),
run_as_root=True)
| 38.996845
| 79
| 0.523783
|
afc7a293ca9c9dd3c95230cef935d914469caa7b
| 14,752
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_ha_eem_policy_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_ha_eem_policy_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_ha_eem_policy_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Eem.DirUser.Library' : {
'meta_info' : _MetaInfoClass('Eem.DirUser.Library',
False,
[
_MetaInfoClassMember('library', ATTRIBUTE, 'str' , None, None,
[], [],
''' library
''',
'library',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' policy
''',
'policy',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
],
'Cisco-IOS-XR-ha-eem-policy-oper',
'library',
_yang_ns._namespaces['Cisco-IOS-XR-ha-eem-policy-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper'
),
},
'Eem.DirUser.Policy' : {
'meta_info' : _MetaInfoClass('Eem.DirUser.Policy',
False,
[
_MetaInfoClassMember('library', ATTRIBUTE, 'str' , None, None,
[], [],
''' library
''',
'library',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' policy
''',
'policy',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
],
'Cisco-IOS-XR-ha-eem-policy-oper',
'policy',
_yang_ns._namespaces['Cisco-IOS-XR-ha-eem-policy-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper'
),
},
'Eem.DirUser' : {
'meta_info' : _MetaInfoClass('Eem.DirUser',
False,
[
_MetaInfoClassMember('library', REFERENCE_CLASS, 'Library' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper', 'Eem.DirUser.Library',
[], [],
''' directory user library
''',
'library',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('policy', REFERENCE_CLASS, 'Policy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper', 'Eem.DirUser.Policy',
[], [],
''' directory user policy
''',
'policy',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
],
'Cisco-IOS-XR-ha-eem-policy-oper',
'dir-user',
_yang_ns._namespaces['Cisco-IOS-XR-ha-eem-policy-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper'
),
},
'Eem.EnvVariables.EnvVariable' : {
'meta_info' : _MetaInfoClass('Eem.EnvVariables.EnvVariable',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Environmental variable name
''',
'name',
'Cisco-IOS-XR-ha-eem-policy-oper', True),
_MetaInfoClassMember('name-xr', ATTRIBUTE, 'str' , None, None,
[], [],
''' variable name
''',
'name_xr',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'str' , None, None,
[], [],
''' value
''',
'value',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
],
'Cisco-IOS-XR-ha-eem-policy-oper',
'env-variable',
_yang_ns._namespaces['Cisco-IOS-XR-ha-eem-policy-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper'
),
},
'Eem.EnvVariables' : {
'meta_info' : _MetaInfoClass('Eem.EnvVariables',
False,
[
_MetaInfoClassMember('env-variable', REFERENCE_LIST, 'EnvVariable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper', 'Eem.EnvVariables.EnvVariable',
[], [],
''' environmental variables name and value
''',
'env_variable',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
],
'Cisco-IOS-XR-ha-eem-policy-oper',
'env-variables',
_yang_ns._namespaces['Cisco-IOS-XR-ha-eem-policy-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper'
),
},
'Eem.RefreshTime' : {
'meta_info' : _MetaInfoClass('Eem.RefreshTime',
False,
[
_MetaInfoClassMember('refreshtime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Event manager refresh-time
''',
'refreshtime',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
],
'Cisco-IOS-XR-ha-eem-policy-oper',
'refresh-time',
_yang_ns._namespaces['Cisco-IOS-XR-ha-eem-policy-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper'
),
},
'Eem.RegPolicies.RegPolicy' : {
'meta_info' : _MetaInfoClass('Eem.RegPolicies.RegPolicy',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' policy name
''',
'name',
'Cisco-IOS-XR-ha-eem-policy-oper', True),
_MetaInfoClassMember('class', ATTRIBUTE, 'str' , None, None,
[], [],
''' class
''',
'class_',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[], [],
''' description
''',
'description',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('event-type', ATTRIBUTE, 'str' , None, None,
[], [],
''' event type
''',
'event_type',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('persist-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' PersistTime
''',
'persist_time',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' policy name
''',
'policy_name',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('time-created', ATTRIBUTE, 'str' , None, None,
[], [],
''' time created
''',
'time_created',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('trap', ATTRIBUTE, 'str' , None, None,
[], [],
''' trap
''',
'trap',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('type', ATTRIBUTE, 'str' , None, None,
[], [],
''' policy type
''',
'type',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('username', ATTRIBUTE, 'str' , None, None,
[], [],
''' username
''',
'username',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
],
'Cisco-IOS-XR-ha-eem-policy-oper',
'reg-policy',
_yang_ns._namespaces['Cisco-IOS-XR-ha-eem-policy-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper'
),
},
'Eem.RegPolicies' : {
'meta_info' : _MetaInfoClass('Eem.RegPolicies',
False,
[
_MetaInfoClassMember('reg-policy', REFERENCE_LIST, 'RegPolicy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper', 'Eem.RegPolicies.RegPolicy',
[], [],
''' policy name and create time
''',
'reg_policy',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
],
'Cisco-IOS-XR-ha-eem-policy-oper',
'reg-policies',
_yang_ns._namespaces['Cisco-IOS-XR-ha-eem-policy-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper'
),
},
'Eem.AvlPolicies.AvlPolicy' : {
'meta_info' : _MetaInfoClass('Eem.AvlPolicies.AvlPolicy',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' System policy name
''',
'name',
'Cisco-IOS-XR-ha-eem-policy-oper', True),
_MetaInfoClassMember('policy-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' policy name
''',
'policy_name',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('time-created', ATTRIBUTE, 'str' , None, None,
[], [],
''' time created
''',
'time_created',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('type', ATTRIBUTE, 'str' , None, None,
[], [],
''' policy type
''',
'type',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
],
'Cisco-IOS-XR-ha-eem-policy-oper',
'avl-policy',
_yang_ns._namespaces['Cisco-IOS-XR-ha-eem-policy-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper'
),
},
'Eem.AvlPolicies' : {
'meta_info' : _MetaInfoClass('Eem.AvlPolicies',
False,
[
_MetaInfoClassMember('avl-policy', REFERENCE_LIST, 'AvlPolicy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper', 'Eem.AvlPolicies.AvlPolicy',
[], [],
''' policy name and create time
''',
'avl_policy',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
],
'Cisco-IOS-XR-ha-eem-policy-oper',
'avl-policies',
_yang_ns._namespaces['Cisco-IOS-XR-ha-eem-policy-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper'
),
},
'Eem' : {
'meta_info' : _MetaInfoClass('Eem',
False,
[
_MetaInfoClassMember('avl-policies', REFERENCE_CLASS, 'AvlPolicies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper', 'Eem.AvlPolicies',
[], [],
''' list the available policies
''',
'avl_policies',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('dir-user', REFERENCE_CLASS, 'DirUser' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper', 'Eem.DirUser',
[], [],
''' directory user
''',
'dir_user',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('env-variables', REFERENCE_CLASS, 'EnvVariables' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper', 'Eem.EnvVariables',
[], [],
''' list of environmental variables
''',
'env_variables',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('refresh-time', REFERENCE_CLASS, 'RefreshTime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper', 'Eem.RefreshTime',
[], [],
''' Refresh time
''',
'refresh_time',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
_MetaInfoClassMember('reg-policies', REFERENCE_CLASS, 'RegPolicies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper', 'Eem.RegPolicies',
[], [],
''' list the registered policies
''',
'reg_policies',
'Cisco-IOS-XR-ha-eem-policy-oper', False),
],
'Cisco-IOS-XR-ha-eem-policy-oper',
'eem',
_yang_ns._namespaces['Cisco-IOS-XR-ha-eem-policy-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ha_eem_policy_oper'
),
},
}
_meta_table['Eem.DirUser.Library']['meta_info'].parent =_meta_table['Eem.DirUser']['meta_info']
_meta_table['Eem.DirUser.Policy']['meta_info'].parent =_meta_table['Eem.DirUser']['meta_info']
_meta_table['Eem.EnvVariables.EnvVariable']['meta_info'].parent =_meta_table['Eem.EnvVariables']['meta_info']
_meta_table['Eem.RegPolicies.RegPolicy']['meta_info'].parent =_meta_table['Eem.RegPolicies']['meta_info']
_meta_table['Eem.AvlPolicies.AvlPolicy']['meta_info'].parent =_meta_table['Eem.AvlPolicies']['meta_info']
_meta_table['Eem.DirUser']['meta_info'].parent =_meta_table['Eem']['meta_info']
_meta_table['Eem.EnvVariables']['meta_info'].parent =_meta_table['Eem']['meta_info']
_meta_table['Eem.RefreshTime']['meta_info'].parent =_meta_table['Eem']['meta_info']
_meta_table['Eem.RegPolicies']['meta_info'].parent =_meta_table['Eem']['meta_info']
_meta_table['Eem.AvlPolicies']['meta_info'].parent =_meta_table['Eem']['meta_info']
| 43.260997
| 183
| 0.487866
|
05e70589e9212165d8740953ce916c8bbafd5fac
| 373
|
py
|
Python
|
tests/api/models.py
|
TralahM/drf-generators
|
0b4b79ae25ddc02aa2f5e9bc9f62be16a9ab028a
|
[
"MIT"
] | 340
|
2015-04-07T20:32:30.000Z
|
2022-03-28T12:54:38.000Z
|
tests/api/models.py
|
TralahM/drf-generators
|
0b4b79ae25ddc02aa2f5e9bc9f62be16a9ab028a
|
[
"MIT"
] | 37
|
2015-04-07T22:56:00.000Z
|
2021-05-19T09:36:47.000Z
|
tests/api/models.py
|
TralahM/drf-generators
|
0b4b79ae25ddc02aa2f5e9bc9f62be16a9ab028a
|
[
"MIT"
] | 82
|
2015-04-11T06:18:12.000Z
|
2022-03-20T18:26:05.000Z
|
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=64)
class Post(models.Model):
title = models.CharField(max_length=128)
slug = models.SlugField(default='', blank=True, max_length=128)
content = models.TextField()
category = models.ForeignKey('Category', blank=True, null=True, on_delete=models.SET_NULL)
| 28.692308
| 94
| 0.734584
|
073f8c97213afe4a0b568ebba42d3cd2cfdc6b74
| 14,304
|
py
|
Python
|
madoka/report/renderer/tree.py
|
korepwx/madoka
|
56675bd8220935c6a9c1571a886a84bed235fd3b
|
[
"MIT"
] | null | null | null |
madoka/report/renderer/tree.py
|
korepwx/madoka
|
56675bd8220935c6a9c1571a886a84bed235fd3b
|
[
"MIT"
] | null | null | null |
madoka/report/renderer/tree.py
|
korepwx/madoka
|
56675bd8220935c6a9c1571a886a84bed235fd3b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import io
import mimetypes
import os
from PIL import Image
from .base import ReportRenderer, ATTACHMENT_CONTENT_TYPE
from .resources import ReportResourcesManager
__all__ = [
'ReportElementType', 'ReportElement', 'ReportBlock',
'ReportText', 'ReportInternalLink', 'ReportImage', 'ReportAttachment',
'ReportDataFrame', 'ReportTreeTOCNode', 'TreeReportRenderer',
]
class ReportElementType:
"""Enum for indicating the element type."""
BLOCK = 'block'
ROOT = 'root'
TEXT = 'text'
INTERNAL_LINK = 'internal_link'
HTML = 'html'
IMAGE = 'image'
ATTACHMENT = 'attachment'
DATA_FRAME = 'data_frame'
class ReportElement(object):
"""Base class for all rendered element of a report.
Parameters
----------
anchor : str
Anchor name of this element.
If specified, this element can be referenced by internal links.
"""
element_type = None
_repr_attributes = ()
def __init__(self, anchor=None):
self.anchor = anchor
def __repr__(self):
attributes = []
for k in self._repr_attributes:
v = getattr(self, k, None)
if v is not None:
v = repr(v)
if len(v) > 20:
v = '%s..%s' % (v[:10], v[-8:])
attributes.append('%s=%s' % (k, v))
if self.anchor:
attributes.append('anchor=%r' % (self.anchor,))
if attributes:
ret = '%s(%s)' % (self.__class__.__name__, ','.join(attributes))
else:
ret = self.__class__.__name__
return ret
class ReportBlock(ReportElement):
"""A rendered block of a report.
Parameters
----------
title : str
Title of this block.
report : madoka.report.Report
The report object, if it is a report block.
children : collections.Iterable[ReportElement]
Child elements of this block.
anchor : str
Anchor name of this element.
If specified, this element can be referenced by internal links.
"""
element_type = ReportElementType.BLOCK
def __init__(self, title, report=None, children=None, anchor=None):
super(ReportBlock, self).__init__(anchor=anchor)
self.title = title
self.report = report
self.items = list(children) if children else []
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def __repr__(self):
if self.items:
c = '\n'.join(' ' + line for line in
',\n'.join(repr(i) for i in self.items).split('\n'))
if self.title:
ret = 'ReportBlock(title=%r,\n%s\n)' % (self.title, c)
else:
ret = 'ReportBlock(\n%s\n)' % c
else:
ret = 'ReportBlock(title=%r)' % self.title
return ret
def add(self, element):
"""Add a report element to this block."""
self.items.append(element)
class ReportRoot(ReportBlock):
"""Root report block."""
element_type = ReportElementType.ROOT
def __init__(self, title, exp_id, children=None, anchor=None):
super(ReportRoot, self).__init__(title=title, children=children,
anchor=anchor)
self.exp_id = exp_id
class ReportText(ReportElement):
"""A rendered text of a report.
Parameters
----------
text : str
The report text.
anchor : str
Anchor name of this element.
If specified, this element can be referenced by internal links.
"""
element_type = ReportElementType.TEXT
_repr_attributes = ('text',)
def __init__(self, text, anchor=None):
super(ReportText, self).__init__(anchor=anchor)
self.text = text
class ReportInternalLink(ReportElement):
"""A rendered internal link of a report.
Parameters
----------
text : str
The link text.
target : str
The target anchor.
"""
element_type = ReportElementType.INTERNAL_LINK
_repr_attributes = ('text', 'target')
def __init__(self, text, target):
super(ReportInternalLink, self).__init__()
self.text = text
self.target = target
class ReportHTML(ReportElement):
"""A rendered HTML of a report.
Parameters
----------
source : str
The report HTML source.
anchor : str
Anchor name of this element.
If specified, this element can be referenced by internal links.
"""
element_type = ReportElementType.HTML
_repr_attributes = ('source',)
def __init__(self, source, anchor=None):
super(ReportHTML, self).__init__(anchor=anchor)
self.source = source
class _ReportFile(ReportElement):
"""A rendered file of a report.
Parameters
----------
data: bytes
Binary content of this file.
title : str
Title of this file.
filename : str
File name of this file.
extension : str
Extension of this file.
content_type : str
Mime type of this file.
anchor : str
Anchor name of this element.
If specified, this element can be referenced by internal links.
"""
_repr_attributes = ('title', 'filename', 'extension', 'content_type')
def __init__(self, data, title=None, filename=None, extension=None,
content_type=None, anchor=None):
super(_ReportFile, self).__init__(anchor=anchor)
if extension is None:
if filename is not None:
extension = os.path.splitext(filename)[1]
else:
extension = mimetypes.guess_extension(content_type)
if extension is None:
raise RuntimeError('Unknown mime type %r.' % content_type)
self.data = data
self.title = title
self.filename = filename
self.extension = extension
self.content_type = content_type
@property
def title_or_filename(self):
return self.title or self.filename
class ReportImage(_ReportFile):
"""A rendered image of a report.
Parameters
----------
image : PIL.Image.Image | bytes | io.IOBase
PIL image object, the content of image as bytes, or a file-like
object that can read out the content of image.
title : str
Title of the image.
content_type : str
Content-type of the image, required if only the content of the image
rather than a PIL image object is specified.
anchor : str
Anchor name of this element.
If specified, this element can be referenced by internal links.
"""
element_type = ReportElementType.IMAGE
def __init__(self, image, title=None, content_type=None, anchor=None):
ext = None
if isinstance(image, Image.Image):
with io.BytesIO() as f:
image.save(f, format='PNG')
f.seek(0)
img = f.read()
content_type = 'image/png'
ext = '.png'
elif hasattr(image, 'read'):
img = image.read()
if not isinstance(img, bytes):
raise TypeError('Required to read bytes but got string.')
elif isinstance(image, bytes):
img = image
else:
raise TypeError('%r cannot be rendered as image.' % (image,))
if content_type is None:
raise ValueError('Content-type of the image is required.')
super(ReportImage, self).__init__(
img, title=title, extension=ext, content_type=content_type,
anchor=anchor
)
class ReportAttachment(_ReportFile):
"""A rendered attachment of a report.
Parameters
----------
data : bytes | io.IOBase
Bytes of the attachment, or a file-like object.
title : str
Title of the attachment.
content_type : str
Content-type of the attachment.
anchor : str
Anchor name of this element.
If specified, this element can be referenced by internal links.
"""
element_type = ReportElementType.ATTACHMENT
def __init__(self, data, filename, title=None, content_type=None,
anchor=None):
if hasattr(data, 'read'):
cnt = data.read()
if not isinstance(cnt, bytes):
raise TypeError('Required to read bytes but got string.')
elif isinstance(data, bytes):
cnt = data
else:
raise TypeError('%r cannot be rendered as attachment.' % (data,))
if content_type is None:
content_type = mimetypes.guess_type(filename)
if content_type is None:
content_type = ATTACHMENT_CONTENT_TYPE
super(ReportAttachment, self).__init__(
cnt, title=title, filename=filename, content_type=content_type,
anchor=anchor
)
class ReportDataFrame(ReportElement):
"""A pandas DataFrame of a report.
Parameters
----------
df : pandas.DataFrame
Pandas data frame.
title : str
Title of this data frame.
anchor : str
Anchor name of this element.
If specified, this element can be referenced by internal links.
"""
element_type = ReportElementType.DATA_FRAME
_repr_attributes = ('title',)
def __init__(self, df, title=None, anchor=None):
super(ReportDataFrame, self).__init__(anchor=anchor)
self.df = df
self.title = title
class ReportTreeTOCNode(object):
"""Node of the doc tree TOC."""
def __init__(self, title, anchor=None, children=None):
if children is None:
children = []
self.title = title
self.anchor = anchor
self.items = children # type: list[ReportTreeTOCNode]
def __len__(self):
return len(self.items)
def __iter__(self):
return iter(self.items)
def __getitem__(self, item):
return self.items[item]
def __contains__(self, item):
return item in self.items
@classmethod
def from_block(cls, block):
"""Construct a new TOC node from specified block.
Parameters
----------
block : ReportBlock
The report tree block.
Returns
-------
ReportTreeTOCNode
"""
def gather(parent, target):
if isinstance(parent, ReportBlock):
if parent.title:
items = []
for c in parent:
gather(c, items)
node = ReportTreeTOCNode(parent.title, parent.anchor,
items)
target.append(node)
else:
for c in parent:
gather(c, target)
children = []
for child in block:
gather(child, children)
return ReportTreeTOCNode(block.title, block.anchor, children)
class TreeReportRenderer(ReportRenderer):
"""Renderer that builds a doc tree for the report.
A TreeReportRenderer builds a document tree for specified report.
It can be used as the basis for more complicated renderer, like
the ``HTMLReportRenderer``.
Parameters
----------
rrm : ReportResourcesManager
Report resources manager.
rrm_prefix : str
Resources path prefix for the report.
title : str
Title of the root report block.
exp_id : str
Identifier of the experiment.
If is suggested to be a path-like string with "/" as delimiter.
log_path : str
Path of the experiment log file.
"""
def __init__(self, rrm, rrm_prefix='', title=None, log_path=None,
exp_id=None):
super(TreeReportRenderer, self).__init__(rrm, rrm_prefix)
self.title = title
self.exp_id = exp_id
self.log_path = log_path
self.root = None # type: ReportRoot
self._block_stack = None
# get the anchor of toc
self.toc_anchor = self.get_unique_anchor(object(), name='toc')
def get_toc(self):
"""Get the table of contents.
Returns
-------
ReportTreeTOCNode
"""
return ReportTreeTOCNode.from_block(self.root)
def close(self):
pass
def begin_document(self):
self.root = ReportRoot(title=self.title, exp_id=self.exp_id)
self._block_stack = [self.root]
return self
def end_document(self):
self._block_stack.clear()
return self
def _new_element(self, cls, *args, **kwargs):
e = cls(*args, **kwargs)
self._block_stack[-1].add(e)
return e
def begin_block(self, title=None, report=None):
if title:
anchor = self.get_unique_anchor(report, title)
else:
anchor = None
block = self._new_element(ReportBlock, title=title, report=report,
anchor=anchor)
self._block_stack.append(block)
return self
def end_block(self):
if self._block_stack[-1] is self.root:
raise RuntimeError('Attempt to close the document block.')
self._block_stack.pop()
return self
def write_text(self, text):
self._new_element(ReportText, text)
return self
def write_html(self, html):
self._new_element(ReportHTML, html)
return self
def write_image(self, image, title=None, content_type=None):
self._new_element(ReportImage, image=image, title=title,
content_type=content_type)
return self
def write_attachment(self, data, filename, title=None, content_type=None):
self._new_element(
ReportAttachment, data=data, filename=filename,
title=title, content_type=content_type
)
return self
def write_data_frame(self, df, title=None):
self._new_element(ReportDataFrame, df=df, title=title)
return self
def write_figure(self, fig, title=None):
with io.BytesIO() as f:
fig.savefig(f, format='png', dpi=90)
f.seek(0)
return self.write_image(f, content_type='image/png')
| 28.213018
| 78
| 0.591443
|
e7793b475a2df4e24c9c899af3f4038f36ee4ba0
| 1,262
|
py
|
Python
|
pycode/tinyflow/GetPredictResults.py
|
GIS-PuppetMaster/TENSILE
|
e19f973bb30fba69a23644389c82a4471ee5a241
|
[
"MIT"
] | null | null | null |
pycode/tinyflow/GetPredictResults.py
|
GIS-PuppetMaster/TENSILE
|
e19f973bb30fba69a23644389c82a4471ee5a241
|
[
"MIT"
] | null | null | null |
pycode/tinyflow/GetPredictResults.py
|
GIS-PuppetMaster/TENSILE
|
e19f973bb30fba69a23644389c82a4471ee5a241
|
[
"MIT"
] | 1
|
2020-09-27T07:27:47.000Z
|
2020-09-27T07:27:47.000Z
|
import os
from tensorflow.python.eager import executor
os.environ['CUDA_VISIBLE_DEVICES'] = f'{0}'
from VGG16_test_leo import VGG16
from Inceptionv3_test_leo import Inceptionv3
from Inceptionv4_test_leo import Inceptionv4
from ResNet50_test_leo import ResNet50
from DenseNet_test_leo import DenseNet121
import pickle as pkl
def get_predict_results(batch_size, num_step, log_path, job_id, model, **kwargs):
m = model(num_step=num_step, batch_size=batch_size, log_path=log_path, job_id=job_id)
return m.get_predict_results(1000)
if __name__ == '__main__':
if not os.path.exists('./log/tempschedule/'):
os.makedirs('./log/tempschedule/')
log_path = f'./log/tempschedule/'
model_list = [VGG16, Inceptionv3, Inceptionv4, ResNet50, DenseNet121]
predict_results = [get_predict_results(2, 50, log_path, job_id, model_list[job_id]) for job_id in
range(len(model_list))]
res = {}
if not os.path.exists('../../res/inferred_shape/'):
os.makedirs('../../res/inferred_shape/')
for i, name in enumerate(['VGG16', 'Inceptionv3', 'Inceptionv4', 'ResNet50', 'DenseNet121']):
res[name] = predict_results[i]
with open(f'../../res/inferred_shape.pkl', 'wb') as f:
pkl.dump(res, f)
| 37.117647
| 101
| 0.706815
|
72ffd6093d22dfa993f523ed425bcec8d3320b02
| 18,936
|
py
|
Python
|
tools/gn/bootstrap/bootstrap.py
|
Wzzzx/chromium-crosswalk
|
768dde8efa71169f1c1113ca6ef322f1e8c9e7de
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-01-28T08:09:58.000Z
|
2021-11-15T15:32:10.000Z
|
tools/gn/bootstrap/bootstrap.py
|
Wzzzx/chromium-crosswalk
|
768dde8efa71169f1c1113ca6ef322f1e8c9e7de
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/gn/bootstrap/bootstrap.py
|
Wzzzx/chromium-crosswalk
|
768dde8efa71169f1c1113ca6ef322f1e8c9e7de
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 6
|
2020-09-23T08:56:12.000Z
|
2021-11-18T03:40:49.000Z
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file isn't officially supported by the Chromium project. It's maintained
# on a best-effort basis by volunteers, so some things may be broken from time
# to time. If you encounter errors, it's most often due to files in base that
# have been added or moved since somebody last tried this script. Generally
# such errors are easy to diagnose.
"""Bootstraps gn.
It is done by first building it manually in a temporary directory, then building
it with its own BUILD.gn to the final destination.
"""
import contextlib
import errno
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
BOOTSTRAP_DIR = os.path.dirname(os.path.abspath(__file__))
GN_ROOT = os.path.dirname(BOOTSTRAP_DIR)
SRC_ROOT = os.path.dirname(os.path.dirname(GN_ROOT))
is_linux = sys.platform.startswith('linux')
is_mac = sys.platform.startswith('darwin')
is_posix = is_linux or is_mac
def check_call(cmd, **kwargs):
logging.debug('Running: %s', ' '.join(cmd))
subprocess.check_call(cmd, cwd=GN_ROOT, **kwargs)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
@contextlib.contextmanager
def scoped_tempdir():
path = tempfile.mkdtemp()
try:
yield path
finally:
shutil.rmtree(path)
def run_build(tempdir, options):
if options.debug:
build_rel = os.path.join('out', 'Debug')
else:
build_rel = os.path.join('out', 'Release')
build_root = os.path.join(SRC_ROOT, build_rel)
print 'Building gn manually in a temporary directory for bootstrapping...'
build_gn_with_ninja_manually(tempdir, options)
temp_gn = os.path.join(tempdir, 'gn')
out_gn = os.path.join(build_root, 'gn')
if options.no_rebuild:
mkdir_p(build_root)
shutil.copy2(temp_gn, out_gn)
else:
print 'Building gn using itself to %s...' % build_rel
build_gn_with_gn(temp_gn, build_root, options)
if options.output:
# Preserve the executable permission bit.
shutil.copy2(out_gn, options.output)
def main(argv):
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('-d', '--debug', action='store_true',
help='Do a debug build. Defaults to release build.')
parser.add_option('-o', '--output',
help='place output in PATH', metavar='PATH')
parser.add_option('-s', '--no-rebuild', action='store_true',
help='Do not rebuild GN with GN.')
parser.add_option('--no-clean', action='store_true',
help='Re-used build directory instead of using new '
'temporary location each time')
parser.add_option('--gn-gen-args', help='Args to pass to gn gen --args')
parser.add_option('-v', '--verbose', action='store_true',
help='Log more details')
options, args = parser.parse_args(argv)
if args:
parser.error('Unrecognized command line arguments: %s.' % ', '.join(args))
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
try:
if options.no_clean:
build_dir = os.path.join(SRC_ROOT, 'out_bootstrap')
if not os.path.exists(build_dir):
os.makedirs(build_dir)
return run_build(build_dir, options)
else:
with scoped_tempdir() as tempdir:
return run_build(tempdir, options)
except subprocess.CalledProcessError as e:
print >> sys.stderr, str(e)
return 1
return 0
def write_buildflag_header_manually(root_gen_dir, header, flags):
mkdir_p(os.path.join(root_gen_dir, os.path.dirname(header)))
with tempfile.NamedTemporaryFile() as f:
f.write('--flags')
for name,value in flags.items():
f.write(' ' + name + '=' + value)
f.flush()
check_call([
os.path.join(SRC_ROOT, 'build', 'write_buildflag_header.py'),
'--output', header,
'--gen-dir', root_gen_dir,
'--definitions', f.name,
])
def build_gn_with_ninja_manually(tempdir, options):
root_gen_dir = os.path.join(tempdir, 'gen')
mkdir_p(root_gen_dir)
write_buildflag_header_manually(root_gen_dir, 'base/allocator/features.h',
{'USE_EXPERIMENTAL_ALLOCATOR_SHIM': 'true' if is_linux else 'false'})
write_buildflag_header_manually(root_gen_dir, 'base/debug/debugging_flags.h',
{'ENABLE_PROFILING': 'false'})
if is_mac:
# //base/build_time.cc needs base/generated_build_date.h,
# and this file is only included for Mac builds.
mkdir_p(os.path.join(root_gen_dir, 'base'))
check_call([
os.path.join(SRC_ROOT, 'build', 'write_build_date_header.py'),
os.path.join(root_gen_dir, 'base', 'generated_build_date.h'),
'default'
])
write_ninja(os.path.join(tempdir, 'build.ninja'), root_gen_dir, options)
cmd = ['ninja', '-C', tempdir]
if options.verbose:
cmd.append('-v')
cmd.append('gn')
check_call(cmd)
def write_ninja(path, root_gen_dir, options):
cc = os.environ.get('CC', '')
cxx = os.environ.get('CXX', '')
cflags = os.environ.get('CFLAGS', '').split()
cflags_cc = os.environ.get('CXXFLAGS', '').split()
ld = os.environ.get('LD', cxx)
ldflags = os.environ.get('LDFLAGS', '').split()
include_dirs = [root_gen_dir, SRC_ROOT]
libs = []
# //base/allocator/allocator_extension.cc needs this macro defined,
# otherwise there would be link errors.
cflags.extend(['-DNO_TCMALLOC'])
if is_posix:
if options.debug:
cflags.extend(['-O0', '-g'])
else:
cflags.extend(['-O2', '-g0'])
cflags.extend([
'-D_FILE_OFFSET_BITS=64',
'-pthread',
'-pipe',
'-fno-exceptions'
])
cflags_cc.extend(['-std=c++11', '-Wno-c++11-narrowing'])
static_libraries = {
'base': {'sources': [], 'tool': 'cxx', 'include_dirs': []},
'dynamic_annotations': {'sources': [], 'tool': 'cc', 'include_dirs': []},
'gn': {'sources': [], 'tool': 'cxx', 'include_dirs': []},
}
for name in os.listdir(GN_ROOT):
if not name.endswith('.cc'):
continue
if name.endswith('_unittest.cc'):
continue
if name == 'run_all_unittests.cc':
continue
full_path = os.path.join(GN_ROOT, name)
static_libraries['gn']['sources'].append(
os.path.relpath(full_path, SRC_ROOT))
static_libraries['dynamic_annotations']['sources'].extend([
'base/third_party/dynamic_annotations/dynamic_annotations.c',
'base/third_party/superfasthash/superfasthash.c',
])
static_libraries['base']['sources'].extend([
'base/allocator/allocator_check.cc',
'base/allocator/allocator_extension.cc',
'base/at_exit.cc',
'base/base_paths.cc',
'base/base_switches.cc',
'base/callback_internal.cc',
'base/command_line.cc',
'base/debug/alias.cc',
'base/debug/stack_trace.cc',
'base/debug/task_annotator.cc',
'base/environment.cc',
'base/files/file.cc',
'base/files/file_enumerator.cc',
'base/files/file_path.cc',
'base/files/file_path_constants.cc',
'base/files/file_tracing.cc',
'base/files/file_util.cc',
'base/files/important_file_writer.cc',
'base/files/memory_mapped_file.cc',
'base/files/scoped_file.cc',
'base/hash.cc',
'base/json/json_parser.cc',
'base/json/json_reader.cc',
'base/json/json_string_value_serializer.cc',
'base/json/json_writer.cc',
'base/json/string_escape.cc',
'base/lazy_instance.cc',
'base/location.cc',
'base/logging.cc',
'base/md5.cc',
'base/memory/ref_counted.cc',
'base/memory/ref_counted_memory.cc',
'base/memory/singleton.cc',
'base/memory/weak_ptr.cc',
'base/message_loop/incoming_task_queue.cc',
'base/message_loop/message_loop.cc',
'base/message_loop/message_loop_task_runner.cc',
'base/message_loop/message_pump.cc',
'base/message_loop/message_pump_default.cc',
'base/metrics/bucket_ranges.cc',
'base/metrics/histogram.cc',
'base/metrics/histogram_base.cc',
'base/metrics/histogram_samples.cc',
'base/metrics/metrics_hashes.cc',
'base/metrics/persistent_histogram_allocator.cc',
'base/metrics/persistent_memory_allocator.cc',
'base/metrics/persistent_sample_map.cc',
'base/metrics/sample_map.cc',
'base/metrics/sample_vector.cc',
'base/metrics/sparse_histogram.cc',
'base/metrics/statistics_recorder.cc',
'base/path_service.cc',
'base/pending_task.cc',
'base/pickle.cc',
'base/process/kill.cc',
'base/process/process_iterator.cc',
'base/process/process_metrics.cc',
'base/profiler/scoped_profile.cc',
'base/profiler/scoped_tracker.cc',
'base/profiler/tracked_time.cc',
'base/run_loop.cc',
'base/sequence_checker_impl.cc',
'base/sequenced_task_runner.cc',
'base/sha1.cc',
'base/strings/pattern.cc',
'base/strings/string16.cc',
'base/strings/string_number_conversions.cc',
'base/strings/string_piece.cc',
'base/strings/string_split.cc',
'base/strings/string_util.cc',
'base/strings/string_util_constants.cc',
'base/strings/stringprintf.cc',
'base/strings/utf_string_conversion_utils.cc',
'base/strings/utf_string_conversions.cc',
'base/synchronization/cancellation_flag.cc',
'base/synchronization/lock.cc',
'base/sys_info.cc',
'base/task_runner.cc',
'base/third_party/dmg_fp/dtoa_wrapper.cc',
'base/third_party/dmg_fp/g_fmt.cc',
'base/third_party/icu/icu_utf.cc',
'base/third_party/nspr/prtime.cc',
'base/threading/non_thread_safe_impl.cc',
'base/threading/post_task_and_reply_impl.cc',
'base/threading/sequenced_task_runner_handle.cc',
'base/threading/sequenced_worker_pool.cc',
'base/threading/simple_thread.cc',
'base/threading/thread.cc',
'base/threading/thread_checker_impl.cc',
'base/threading/thread_collision_warner.cc',
'base/threading/thread_id_name_manager.cc',
'base/threading/thread_local_storage.cc',
'base/threading/thread_restrictions.cc',
'base/threading/thread_task_runner_handle.cc',
'base/threading/worker_pool.cc',
'base/time/time.cc',
'base/timer/elapsed_timer.cc',
'base/timer/timer.cc',
'base/trace_event/heap_profiler_allocation_context.cc',
'base/trace_event/heap_profiler_allocation_context_tracker.cc',
'base/trace_event/heap_profiler_allocation_register.cc',
'base/trace_event/heap_profiler_heap_dump_writer.cc',
'base/trace_event/heap_profiler_stack_frame_deduplicator.cc',
'base/trace_event/heap_profiler_type_name_deduplicator.cc',
'base/trace_event/memory_allocator_dump.cc',
'base/trace_event/memory_allocator_dump_guid.cc',
'base/trace_event/memory_dump_manager.cc',
'base/trace_event/memory_dump_request_args.cc',
'base/trace_event/memory_dump_session_state.cc',
'base/trace_event/memory_infra_background_whitelist.cc',
'base/trace_event/process_memory_dump.cc',
'base/trace_event/process_memory_maps.cc',
'base/trace_event/process_memory_totals.cc',
'base/trace_event/trace_buffer.cc',
'base/trace_event/trace_config.cc',
'base/trace_event/trace_event_argument.cc',
'base/trace_event/trace_event_impl.cc',
'base/trace_event/trace_event_memory_overhead.cc',
'base/trace_event/trace_event_synthetic_delay.cc',
'base/trace_event/trace_log.cc',
'base/trace_event/trace_log_constants.cc',
'base/trace_event/trace_sampling_thread.cc',
'base/trace_event/tracing_agent.cc',
'base/tracked_objects.cc',
'base/tracking_info.cc',
'base/values.cc',
'base/vlog.cc',
])
if is_posix:
static_libraries['base']['sources'].extend([
'base/base_paths_posix.cc',
'base/debug/debugger_posix.cc',
'base/debug/stack_trace_posix.cc',
'base/files/file_enumerator_posix.cc',
'base/files/file_posix.cc',
'base/files/file_util_posix.cc',
'base/files/memory_mapped_file_posix.cc',
'base/message_loop/message_pump_libevent.cc',
'base/posix/file_descriptor_shuffle.cc',
'base/posix/safe_strerror.cc',
'base/process/kill_posix.cc',
'base/process/process_handle_posix.cc',
'base/process/process_metrics_posix.cc',
'base/process/process_posix.cc',
'base/synchronization/condition_variable_posix.cc',
'base/synchronization/lock_impl_posix.cc',
'base/synchronization/read_write_lock_posix.cc',
'base/synchronization/waitable_event_posix.cc',
'base/sys_info_posix.cc',
'base/threading/platform_thread_internal_posix.cc',
'base/threading/platform_thread_posix.cc',
'base/threading/thread_local_posix.cc',
'base/threading/thread_local_storage_posix.cc',
'base/threading/worker_pool_posix.cc',
'base/time/time_posix.cc',
'base/trace_event/heap_profiler_allocation_register_posix.cc',
])
static_libraries['libevent'] = {
'sources': [
'base/third_party/libevent/buffer.c',
'base/third_party/libevent/evbuffer.c',
'base/third_party/libevent/evdns.c',
'base/third_party/libevent/event.c',
'base/third_party/libevent/event_tagging.c',
'base/third_party/libevent/evrpc.c',
'base/third_party/libevent/evutil.c',
'base/third_party/libevent/http.c',
'base/third_party/libevent/log.c',
'base/third_party/libevent/poll.c',
'base/third_party/libevent/select.c',
'base/third_party/libevent/signal.c',
'base/third_party/libevent/strlcpy.c',
],
'tool': 'cc',
'include_dirs': [],
'cflags': cflags + ['-DHAVE_CONFIG_H'],
}
if is_linux:
libs.extend(['-lrt'])
ldflags.extend(['-pthread'])
static_libraries['xdg_user_dirs'] = {
'sources': [
'base/third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
],
'tool': 'cxx',
}
static_libraries['base']['sources'].extend([
'base/allocator/allocator_shim.cc',
'base/allocator/allocator_shim_default_dispatch_to_glibc.cc',
'base/memory/shared_memory_posix.cc',
'base/nix/xdg_util.cc',
'base/process/internal_linux.cc',
'base/process/process_handle_linux.cc',
'base/process/process_iterator_linux.cc',
'base/process/process_linux.cc',
'base/process/process_metrics_linux.cc',
'base/strings/sys_string_conversions_posix.cc',
'base/sys_info_linux.cc',
'base/threading/platform_thread_linux.cc',
'base/trace_event/malloc_dump_provider.cc',
])
static_libraries['libevent']['include_dirs'].extend([
os.path.join(SRC_ROOT, 'base', 'third_party', 'libevent', 'linux')
])
static_libraries['libevent']['sources'].extend([
'base/third_party/libevent/epoll.c',
])
if is_mac:
static_libraries['base']['sources'].extend([
'base/base_paths_mac.mm',
'base/build_time.cc',
'base/rand_util.cc',
'base/rand_util_posix.cc',
'base/files/file_util_mac.mm',
'base/mac/bundle_locations.mm',
'base/mac/call_with_eh_frame.cc',
'base/mac/call_with_eh_frame_asm.S',
'base/mac/foundation_util.mm',
'base/mac/mach_logging.cc',
'base/mac/scoped_mach_port.cc',
'base/mac/scoped_mach_vm.cc',
'base/mac/scoped_nsautorelease_pool.mm',
'base/memory/shared_memory_handle_mac.cc',
'base/memory/shared_memory_mac.cc',
'base/message_loop/message_pump_mac.mm',
'base/metrics/field_trial.cc',
'base/process/process_handle_mac.cc',
'base/process/process_iterator_mac.cc',
'base/process/process_metrics_mac.cc',
'base/strings/sys_string_conversions_mac.mm',
'base/time/time_mac.cc',
'base/threading/platform_thread_mac.mm',
'base/trace_event/malloc_dump_provider.cc',
])
static_libraries['libevent']['include_dirs'].extend([
os.path.join(SRC_ROOT, 'base', 'third_party', 'libevent', 'mac')
])
static_libraries['libevent']['sources'].extend([
'base/third_party/libevent/kqueue.c',
])
if is_mac:
template_filename = 'build_mac.ninja.template'
else:
template_filename = 'build.ninja.template'
with open(os.path.join(GN_ROOT, 'bootstrap', template_filename)) as f:
ninja_template = f.read()
def src_to_obj(path):
return '%s' % os.path.splitext(path)[0] + '.o'
ninja_lines = []
for library, settings in static_libraries.iteritems():
for src_file in settings['sources']:
ninja_lines.extend([
'build %s: %s %s' % (src_to_obj(src_file),
settings['tool'],
os.path.join(SRC_ROOT, src_file)),
' includes = %s' % ' '.join(
['-I' + dirname for dirname in
include_dirs + settings.get('include_dirs', [])]),
' cflags = %s' % ' '.join(cflags + settings.get('cflags', [])),
' cflags_cc = %s' %
' '.join(cflags_cc + settings.get('cflags_cc', [])),
])
if cc:
ninja_lines.append(' cc = %s' % cc)
if cxx:
ninja_lines.append(' cxx = %s' % cxx)
ninja_lines.append('build %s.a: alink_thin %s' % (
library,
' '.join([src_to_obj(src_file) for src_file in settings['sources']])))
if is_mac:
libs.extend([
'-framework', 'AppKit',
'-framework', 'CoreFoundation',
'-framework', 'Foundation',
'-framework', 'Security',
]);
ninja_lines.extend([
'build gn: link %s' % (
' '.join(['%s.a' % library for library in static_libraries])),
' ldflags = %s' % ' '.join(ldflags),
' libs = %s' % ' '.join(libs),
])
if ld:
ninja_lines.append(' ld = %s' % ld)
else:
ninja_lines.append(' ld = $ldxx')
ninja_lines.append('') # Make sure the file ends with a newline.
with open(path, 'w') as f:
f.write(ninja_template + '\n'.join(ninja_lines))
def build_gn_with_gn(temp_gn, build_dir, options):
gn_gen_args = options.gn_gen_args or ''
if not options.debug:
gn_gen_args += ' is_debug=false'
cmd = [temp_gn, 'gen', build_dir, '--args=%s' % gn_gen_args]
check_call(cmd)
cmd = ['ninja', '-C', build_dir]
if options.verbose:
cmd.append('-v')
cmd.append('gn')
check_call(cmd)
if not options.debug:
check_call(['strip', os.path.join(build_dir, 'gn')])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 35.328358
| 80
| 0.654943
|
a9ff40e8441994dda413e3bde545d9838d1035cb
| 59,831
|
py
|
Python
|
efinance/stock/getter.py
|
vensentzhou/efinance
|
f76f77155e90b8738f7ed3ccec577837c60e075d
|
[
"MIT"
] | null | null | null |
efinance/stock/getter.py
|
vensentzhou/efinance
|
f76f77155e90b8738f7ed3ccec577837c60e075d
|
[
"MIT"
] | null | null | null |
efinance/stock/getter.py
|
vensentzhou/efinance
|
f76f77155e90b8738f7ed3ccec577837c60e075d
|
[
"MIT"
] | null | null | null |
import json
import calendar
import numpy as np
from ..utils import (search_quote, to_type)
from datetime import datetime, timedelta
from ..utils import process_dataframe_and_series
import rich
from jsonpath import jsonpath
from retry import retry
import pandas as pd
import requests
import multitasking
import signal
from tqdm import tqdm
from typing import (Dict,
List,
Union)
from ..shared import session
from ..common import get_quote_history as get_quote_history_for_stock
from ..common import get_history_bill as get_history_bill_for_stock
from ..common import get_today_bill as get_today_bill_for_stock
from ..common import get_realtime_quotes_by_fs
from ..utils import (to_numeric,
get_quote_id)
from .config import EASTMONEY_STOCK_DAILY_BILL_BOARD_FIELDS, EASTMONEY_STOCK_BASE_INFO_FIELDS
from ..common.config import (
FS_DICT,
MARKET_NUMBER_DICT,
EASTMONEY_REQUEST_HEADERS,
EASTMONEY_QUOTE_FIELDS
)
signal.signal(signal.SIGINT, multitasking.killall)
@to_numeric
def get_base_info_single(stock_code: str) -> pd.Series:
"""
获取单股票基本信息
Parameters
----------
stock_code : str
股票代码
Returns
-------
Series
单只股票基本信息
"""
fields = ",".join(EASTMONEY_STOCK_BASE_INFO_FIELDS.keys())
secid = get_quote_id(stock_code)
if not secid:
return pd.Series(index=EASTMONEY_STOCK_BASE_INFO_FIELDS.values(), dtype='object')
params = (
('ut', 'fa5fd1943c7b386f172d6893dbfba10b'),
('invt', '2'),
('fltt', '2'),
('fields', fields),
('secid', secid),
)
url = 'http://push2.eastmoney.com/api/qt/stock/get'
json_response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
s = pd.Series(json_response['data']).rename(
index=EASTMONEY_STOCK_BASE_INFO_FIELDS)
return s[EASTMONEY_STOCK_BASE_INFO_FIELDS.values()]
def get_base_info_muliti(stock_codes: List[str]) -> pd.DataFrame:
"""
获取股票多只基本信息
Parameters
----------
stock_codes : List[str]
股票代码列表
Returns
-------
DataFrame
多只股票基本信息
"""
@multitasking.task
@retry(tries=3, delay=1)
def start(stock_code: str):
s = get_base_info_single(stock_code)
dfs.append(s)
pbar.update()
pbar.set_description(f'Processing => {stock_code}')
dfs: List[pd.DataFrame] = []
pbar = tqdm(total=len(stock_codes))
for stock_code in stock_codes:
start(stock_code)
multitasking.wait_for_tasks()
df = pd.DataFrame(dfs)
df = df.dropna(subset=['股票代码'])
return df
@to_numeric
def get_base_info(stock_codes: Union[str, List[str]]) -> Union[pd.Series, pd.DataFrame]:
"""
Parameters
----------
stock_codes : Union[str, List[str]]
股票代码或股票代码构成的列表
Returns
-------
Union[Series, DataFrame]
- ``Series`` : 包含单只股票基本信息(当 ``stock_codes`` 是字符串时)
- ``DataFrane`` : 包含多只股票基本信息(当 ``stock_codes`` 是字符串列表时)
Raises
------
TypeError
当 ``stock_codes`` 类型不符合要求时
Examples
--------
>>> import efinance as ef
>>> # 获取单只股票信息
>>> ef.stock.get_base_info('600519')
股票代码 600519
股票名称 贵州茅台
市盈率(动) 39.38
市净率 12.54
所处行业 酿酒行业
总市值 2198082348462.0
流通市值 2198082348462.0
板块编号 BK0477
ROE 8.29
净利率 54.1678
净利润 13954462085.610001
毛利率 91.6763
dtype: object
>>> # 获取多只股票信息
>>> ef.stock.get_base_info(['600519','300715'])
股票代码 股票名称 市盈率(动) 市净率 所处行业 总市值 流通市值 板块编号 ROE 净利率 净利润 毛利率
0 300715 凯伦股份 42.29 3.12 水泥建材 9.160864e+09 6.397043e+09 BK0424 3.97 12.1659 5.415488e+07 32.8765
1 600519 贵州茅台 39.38 12.54 酿酒行业 2.198082e+12 2.198082e+12 BK0477 8.29 54.1678 1.395446e+10 91.6763
"""
if isinstance(stock_codes, str):
return get_base_info_single(stock_codes)
elif hasattr(stock_codes, '__iter__'):
return get_base_info_muliti(stock_codes)
raise TypeError(f'所给的 {stock_codes} 不符合参数要求')
def get_quote_history(stock_codes: Union[str, List[str]],
beg: str = '19000101',
end: str = '20500101',
klt: int = 101,
fqt: int = 1,
**kwargs) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""
获取股票的 K 线数据
Parameters
----------
stock_codes : Union[str,List[str]]
股票代码、名称 或者 股票代码、名称构成的列表
beg : str, optional
开始日期,默认为 ``'19000101'`` ,表示 1900年1月1日
end : str, optional
结束日期,默认为 ``'20500101'`` ,表示 2050年1月1日
klt : int, optional
行情之间的时间间隔,默认为 ``101`` ,可选示例如下
- ``1`` : 分钟
- ``5`` : 5 分钟
- ``15`` : 15 分钟
- ``30`` : 30 分钟
- ``60`` : 60 分钟
- ``101`` : 日
- ``102`` : 周
- ``103`` : 月
fqt : int, optional
复权方式,默认为 ``1`` ,可选示例如下
- ``0`` : 不复权
- ``1`` : 前复权
- ``2`` : 后复权
Returns
-------
Union[DataFrame, Dict[str, DataFrame]]
股票的 K 线数据
- ``DataFrame`` : 当 ``stock_codes`` 是 ``str`` 时
- ``Dict[str, DataFrame]`` : 当 ``stock_codes`` 是 ``List[str]`` 时
Examples
--------
>>> import efinance as ef
>>> # 获取单只股票日 K 行情数据
>>> ef.stock.get_quote_history('600519')
股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率
0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83
1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13
2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45
3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72
4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25
... ... ... ... ... ... ... ... ... ... ... ... ... ...
4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38
4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79
4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69
4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68
4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51
>>> # 获取多只股票历史行情
>>> stock_df = ef.stock.get_quote_history(['600519','300750'])
>>> type(stock_df)
<class 'dict'>
>>> stock_df.keys()
dict_keys(['300750', '600519'])
>>> stock_df['600519']
股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率
0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83
1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13
2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45
3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72
4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25
... ... ... ... ... ... ... ... ... ... ... ... ... ...
4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38
4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79
4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69
4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68
4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51
"""
df = get_quote_history_for_stock(
stock_codes,
beg=beg,
end=end,
klt=klt,
fqt=fqt
)
if isinstance(df, pd.DataFrame):
df.rename(columns={'代码': '股票代码',
'名称': '股票名称'
},
inplace=True)
elif isinstance(df, dict):
for stock_code in df.keys():
df[stock_code].rename(columns={'代码': '股票代码',
'名称': '股票名称'
},
inplace=True)
# NOTE 扩展接口 设定此关键词即返回 DataFrame 而不是 dict
if kwargs.get('return_df'):
df: pd.DataFrame = pd.concat(df, axis=0, ignore_index=True)
return df
@process_dataframe_and_series(remove_columns_and_indexes=['市场编号'])
@to_numeric
def get_realtime_quotes(fs: Union[str, List[str]] = None) -> pd.DataFrame:
"""
获取单个或者多个市场行情的最新状况
Parameters
----------
fs : Union[str, List[str]], optional
行情名称或者多个行情名列表 可选值及示例如下
- ``None`` 沪深京A股市场行情
- ``'沪深A股'`` 沪深A股市场行情
- ``'沪A'`` 沪市A股市场行情
- ``'深A'`` 深市A股市场行情
- ``北A`` 北证A股市场行情
- ``'可转债'`` 沪深可转债市场行情
- ``'期货'`` 期货市场行情
- ``'创业板'`` 创业板市场行情
- ``'美股'`` 美股市场行情
- ``'港股'`` 港股市场行情
- ``'中概股'`` 中国概念股市场行情
- ``'新股'`` 沪深新股市场行情
- ``'科创板'`` 科创板市场行情
- ``'沪股通'`` 沪股通市场行情
- ``'深股通'`` 深股通市场行情
- ``'行业板块'`` 行业板块市场行情
- ``'概念板块'`` 概念板块市场行情
- ``'沪深系列指数'`` 沪深系列指数市场行情
- ``'上证系列指数'`` 上证系列指数市场行情
- ``'深证系列指数'`` 深证系列指数市场行情
- ``'ETF'`` ETF 基金市场行情
- ``'LOF'`` LOF 基金市场行情
Returns
-------
DataFrame
单个或者多个市场行情的最新状况
Raises
------
KeyError
当参数 ``fs`` 中含有不正确的行情类型时引发错误
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_realtime_quotes()
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 688787 N海天 277.59 139.48 172.39 139.25 171.66 102.54 85.62 - 78.93 74519 1110318832.0 36.94 5969744000 1213908667 1.688787 沪A
1 301045 N天禄 149.34 39.42 48.95 39.2 48.95 23.61 66.66 - 37.81 163061 683878656.0 15.81 4066344240 964237089 0.301045 深A
2 300532 今天国际 20.04 12.16 12.16 10.69 10.69 2.03 8.85 3.02 -22.72 144795 171535181.0 10.13 3322510580 1989333440 0.300532 深A
3 300600 国瑞科技 20.02 13.19 13.19 11.11 11.41 2.2 18.61 2.82 218.75 423779 541164432.0 10.99 3915421427 3003665117 0.300600 深A
4 300985 致远新能 20.01 47.08 47.08 36.8 39.4 7.85 66.65 2.17 58.37 210697 897370992.0 39.23 6277336472 1488300116 0.300985 深A
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
4598 603186 华正新材 -10.0 43.27 44.09 43.27 43.99 -4.81 1.98 0.48 25.24 27697 120486294.0 48.08 6146300650 6063519472 1.603186 沪A
4599 688185 康希诺-U -10.11 476.4 534.94 460.13 530.0 -53.6 6.02 2.74 -2088.07 40239 1960540832.0 530.0 117885131884 31831479215 1.688185 沪A
4600 688148 芳源股份 -10.57 31.3 34.39 31.3 33.9 -3.7 26.07 0.56 220.01 188415 620632512.0 35.0 15923562000 2261706043 1.688148 沪A
4601 300034 钢研高纳 -10.96 43.12 46.81 42.88 46.5 -5.31 7.45 1.77 59.49 323226 1441101824.0 48.43 20959281094 18706911861 0.300034 深A
4602 300712 永福股份 -13.71 96.9 110.94 95.4 109.0 -15.4 6.96 1.26 511.21 126705 1265152928.0 112.3 17645877600 17645877600 0.300712 深A
>>> ef.stock.get_realtime_quotes(['创业板','港股'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 00859 中昌国际控股 49.02 0.38 0.38 0.26 0.26 0.125 0.08 86.85 -2.83 938000 262860.0 0.255 427510287 427510287 128.00859 None
1 01058 粤海制革 41.05 1.34 1.51 0.9 0.93 0.39 8.34 1.61 249.89 44878000 57662440.0 0.95 720945460 720945460 128.01058 None
2 00713 世界(集团) 27.94 0.87 0.9 0.68 0.68 0.19 1.22 33.28 3.64 9372000 7585400.0 0.68 670785156 670785156 128.00713 None
3 08668 瀛海集团 24.65 0.177 0.179 0.145 0.145 0.035 0.0 10.0 -9.78 20000 3240.0 0.142 212400000 212400000 128.08668 None
4 08413 亚洲杂货 24.44 0.28 0.28 0.25 0.25 0.055 0.01 3.48 -20.76 160000 41300.0 0.225 325360000 325360000 128.08413 None
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
5632 08429 冰雪集团 -16.75 0.174 0.2 0.166 0.2 -0.035 2.48 3.52 -21.58 11895000 2074645.0 0.209 83520000 83520000 128.08429 None
5633 00524 长城天下 -17.56 0.108 0.118 0.103 0.118 -0.023 0.45 15.43 -6.55 5961200 649171.0 0.131 141787800 141787800 128.00524 None
5634 08377 申酉控股 -17.71 0.395 0.46 0.39 0.46 -0.085 0.07 8.06 -5.07 290000 123200.0 0.48 161611035 161611035 128.08377 None
5635 00108 国锐地产 -19.01 1.15 1.42 1.15 1.42 -0.27 0.07 0.78 23.94 2376000 3012080.0 1.42 3679280084 3679280084 128.00108 None
5636 08237 华星控股 -25.0 0.024 0.031 0.023 0.031 -0.008 0.43 8.74 -2.01 15008000 364188.0 0.032 83760000 83760000 128.08237 None
>>> ef.stock.get_realtime_quotes(['ETF'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 513050 中概互联网ETF 4.49 1.444 1.455 1.433 1.452 0.062 6.71 0.92 - 12961671 1870845984.0 1.382 27895816917 27895816917 1.513050 沪A
1 513360 教育ETF 4.38 0.5 0.502 0.486 0.487 0.021 16.89 1.7 - 1104254 54634387.0 0.479 326856952 326856952 1.513360 沪A
2 159766 旅游ETF 3.84 0.974 0.988 0.95 0.95 0.036 14.46 1.97 - 463730 45254947.0 0.938 312304295 312304295 0.159766 深A
3 159865 养殖ETF 3.8 0.819 0.828 0.785 0.791 0.03 12.13 0.89 - 1405871 114254714.0 0.789 949594189 949594189 0.159865 深A
4 516670 畜牧养殖ETF 3.76 0.856 0.864 0.825 0.835 0.031 24.08 0.98 - 292027 24924513.0 0.825 103803953 103803953 1.516670 沪A
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
549 513060 恒生医疗ETF -4.12 0.861 0.905 0.86 0.902 -0.037 47.96 1.57 - 1620502 141454355.0 0.898 290926128 290926128 1.513060 沪A
550 515220 煤炭ETF -4.46 2.226 2.394 2.194 2.378 -0.104 14.39 0.98 - 2178176 487720560.0 2.330 3369247992 3369247992 1.515220 沪A
551 513000 日经225ETF易方达 -4.49 1.212 1.269 1.21 1.269 -0.057 5.02 2.49 - 25819 3152848.0 1.269 62310617 62310617 1.513000 沪A
552 513880 日经225ETF -4.59 1.163 1.224 1.162 1.217 -0.056 16.93 0.94 - 71058 8336846.0 1.219 48811110 48811110 1.513880 沪A
553 513520 日经ETF -4.76 1.2 1.217 1.196 1.217 -0.06 27.7 1.79 - 146520 17645828.0 1.260 63464640 63464640 1.513520 沪A
Notes
-----
无论股票、可转债、期货还是基金。第一列表头始终叫 ``股票代码``
"""
fs_list: List[str] = []
if fs is None:
fs_list.append(FS_DICT['stock'])
if isinstance(fs, str):
fs = [fs]
if isinstance(fs, list):
for f in fs:
if not FS_DICT.get(f):
raise KeyError(f'指定的行情参数 `{fs}` 不正确')
fs_list.append(FS_DICT[f])
# 给空列表时 试用沪深A股行情
if not fs_list:
fs_list.append(FS_DICT['stock'])
fs_str = ','.join(fs_list)
df = get_realtime_quotes_by_fs(fs_str)
df.rename(columns={'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_history_bill(stock_code: str) -> pd.DataFrame:
"""
获取单只股票历史单子流入流出数据
Parameters
----------
stock_code : str
股票代码
Returns
-------
DataFrame
沪深市场单只股票历史单子流入流出数据
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_history_bill('600519')
股票名称 股票代码 日期 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入 主力净流入占比 小单流入净占比 中单流入净占比 大单流入净占比 超大单流入净占比 收盘价 涨跌幅
0 贵州茅台 600519 2021-03-04 -3.670272e+06 -2282056.0 5.952143e+06 1.461528e+09 -1.465199e+09 -0.03 -0.02 0.04 10.99 -11.02 2013.71 -5.05
1 贵州茅台 600519 2021-03-05 -1.514880e+07 -1319066.0 1.646793e+07 -2.528896e+07 1.014016e+07 -0.12 -0.01 0.13 -0.19 0.08 2040.82 1.35
2 贵州茅台 600519 2021-03-08 -8.001702e+08 -877074.0 8.010473e+08 5.670671e+08 -1.367237e+09 -6.29 -0.01 6.30 4.46 -10.75 1940.71 -4.91
3 贵州茅台 600519 2021-03-09 -2.237770e+08 -6391767.0 2.301686e+08 -1.795013e+08 -4.427571e+07 -1.39 -0.04 1.43 -1.11 -0.27 1917.70 -1.19
4 贵州茅台 600519 2021-03-10 -2.044173e+08 -1551798.0 2.059690e+08 -2.378506e+08 3.343331e+07 -2.02 -0.02 2.03 -2.35 0.33 1950.72 1.72
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
97 贵州茅台 600519 2021-07-26 -1.564233e+09 13142211.0 1.551091e+09 -1.270400e+08 -1.437193e+09 -8.74 0.07 8.67 -0.71 -8.03 1804.11 -5.05
98 贵州茅台 600519 2021-07-27 -7.803296e+08 -10424715.0 7.907544e+08 6.725104e+07 -8.475807e+08 -5.12 -0.07 5.19 0.44 -5.56 1712.89 -5.06
99 贵州茅台 600519 2021-07-28 3.997645e+08 2603511.0 -4.023677e+08 2.315648e+08 1.681997e+08 2.70 0.02 -2.72 1.57 1.14 1768.90 3.27
100 贵州茅台 600519 2021-07-29 -9.209842e+08 -2312235.0 9.232964e+08 -3.959741e+08 -5.250101e+08 -8.15 -0.02 8.17 -3.50 -4.65 1749.79 -1.08
101 贵州茅台 600519 2021-07-30 -1.524740e+09 -6020099.0 1.530761e+09 1.147248e+08 -1.639465e+09 -11.63 -0.05 11.68 0.88 -12.51 1678.99 -4.05
"""
df = get_history_bill_for_stock(stock_code)
df.rename(columns={
'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_today_bill(stock_code: str) -> pd.DataFrame:
"""
获取单只股票最新交易日的日内分钟级单子流入流出数据
Parameters
----------
stock_code : str
股票代码
Returns
-------
DataFrame
单只股票最新交易日的日内分钟级单子流入流出数据
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_today_bill('600519')
股票代码 时间 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入
0 600519 2021-07-29 09:31 -3261705.0 -389320.0 3651025.0 -12529658.0 9267953.0
1 600519 2021-07-29 09:32 6437999.0 -606994.0 -5831006.0 -42615994.0 49053993.0
2 600519 2021-07-29 09:33 13179707.0 -606994.0 -12572715.0 -85059118.0 98238825.0
3 600519 2021-07-29 09:34 15385244.0 -970615.0 -14414632.0 -86865209.0 102250453.0
4 600519 2021-07-29 09:35 7853716.0 -970615.0 -6883104.0 -75692436.0 83546152.0
.. ... ... ... ... ... ... ...
235 600519 2021-07-29 14:56 -918956019.0 -1299630.0 920255661.0 -397127393.0 -521828626.0
236 600519 2021-07-29 14:57 -920977761.0 -2319213.0 923296987.0 -397014702.0 -523963059.0
237 600519 2021-07-29 14:58 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
238 600519 2021-07-29 14:59 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
239 600519 2021-07-29 15:00 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
"""
df = get_today_bill_for_stock(stock_code)
df.rename(columns={
'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_latest_quote(stock_codes: List[str]) -> pd.DataFrame:
"""
获取沪深市场多只股票的实时涨幅情况
Parameters
----------
stock_codes : List[str]
多只股票代码列表
Returns
-------
DataFrame
沪深市场、港股、美股多只股票的实时涨幅情况
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_latest_quote(['600519','300750'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 市场类型
0 600519 贵州茅台 0.59 1700.04 1713.0 1679.0 1690.0 10.04 0.30 0.72 43.31 37905 6.418413e+09 1690.0 2135586507912 2135586507912 沪A
1 300750 宁德时代 0.01 502.05 529.9 480.0 480.0 0.05 1.37 1.75 149.57 277258 1.408545e+10 502.0 1169278366994 1019031580505 深A
Notes
-----
当需要获取多只沪深 A 股 的实时涨跌情况时,最好使用 ``efinance.stock.get_realtime_quptes``
"""
if isinstance(stock_codes, str):
stock_codes = [stock_codes]
secids: List[str] = [get_quote_id(stock_code)
for stock_code in stock_codes]
columns = EASTMONEY_QUOTE_FIELDS
fields = ",".join(columns.keys())
params = (
('OSVersion', '14.3'),
('appVersion', '6.3.8'),
('fields', fields),
('fltt', '2'),
('plat', 'Iphone'),
('product', 'EFund'),
('secids', ",".join(secids)),
('serverVersion', '6.3.6'),
('version', '6.3.8'),
)
url = 'https://push2.eastmoney.com/api/qt/ulist.np/get'
json_response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
rows = jsonpath(json_response, '$..diff[:]')
if rows is None:
return pd.DataFrame(columns=columns.values()).rename({
'市场编号': '市场类型'
})
df = pd.DataFrame(rows)[columns.keys()].rename(columns=columns)
df['市场类型'] = df['市场编号'].apply(lambda x: MARKET_NUMBER_DICT.get(str(x)))
del df['市场编号']
return df
@to_numeric
def get_top10_stock_holder_info(stock_code: str,
top: int = 4) -> pd.DataFrame:
"""
获取沪深市场指定股票前十大股东信息
Parameters
----------
stock_code : str
股票代码
top : int, optional
最新 top 个前 10 大流通股东公开信息, 默认为 ``4``
Returns
-------
DataFrame
个股持仓占比前 10 的股东的一些信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_top10_stock_holder_info('600519',top = 1)
股票代码 更新日期 股东代码 股东名称 持股数 持股比例 增减 变动率
0 600519 2021-03-31 80010298 中国贵州茅台酒厂(集团)有限责任公司 6.783亿 54.00% 不变 --
1 600519 2021-03-31 80637337 香港中央结算有限公司 9594万 7.64% -841.1万 -8.06%
2 600519 2021-03-31 80732941 贵州省国有资本运营有限责任公司 5700万 4.54% -182.7万 -3.11%
3 600519 2021-03-31 80010302 贵州茅台酒厂集团技术开发公司 2781万 2.21% 不变 --
4 600519 2021-03-31 80475097 中央汇金资产管理有限责任公司 1079万 0.86% 不变 --
5 600519 2021-03-31 80188285 中国证券金融股份有限公司 803.9万 0.64% -91 0.00%
6 600519 2021-03-31 78043999 深圳市金汇荣盛财富管理有限公司-金汇荣盛三号私募证券投资基金 502.1万 0.40% 不变 --
7 600519 2021-03-31 70400207 中国人寿保险股份有限公司-传统-普通保险产品-005L-CT001沪 434.1万 0.35% 44.72万 11.48%
8 600519 2021-03-31 005827 中国银行股份有限公司-易方达蓝筹精选混合型证券投资基金 432万 0.34% 新进 --
9 600519 2021-03-31 78083830 珠海市瑞丰汇邦资产管理有限公司-瑞丰汇邦三号私募证券投资基金 416.1万 0.33% 不变 --
"""
def gen_fc(stock_code: str) -> str:
"""
Parameters
----------
stock_code : str
股票代码
Returns
-------
str
指定格式的字符串
"""
_type, stock_code = get_quote_id(stock_code).split('.')
_type = int(_type)
# 深市
if _type == 0:
return f'{stock_code}02'
# 沪市
return f'{stock_code}01'
def get_public_dates(stock_code: str) -> List[str]:
"""
获取指定股票公开股东信息的日期
Parameters
----------
stock_code : str
股票代码
Returns
-------
List[str]
公开日期列表
"""
quote_id = get_quote_id(stock_code)
stock_code = quote_id.split('.')[-1]
fc = gen_fc(stock_code)
data = {"fc": fc}
url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetFirstRequest2Data'
json_response = requests.post(
url, json=data).json()
dates = jsonpath(json_response, f'$..BaoGaoQi')
if not dates:
return []
return dates
fields = {
'GuDongDaiMa': '股东代码',
'GuDongMingCheng': '股东名称',
'ChiGuShu': '持股数',
'ChiGuBiLi': '持股比例',
'ZengJian': '增减',
'BianDongBiLi': '变动率',
}
quote_id = get_quote_id(stock_code)
stock_code = quote_id.split('.')[-1]
fc = gen_fc(stock_code)
dates = get_public_dates(stock_code)
dfs: List[pd.DataFrame] = []
empty_df = pd.DataFrame(columns=['股票代码', '日期']+list(fields.values()))
for date in dates[:top]:
data = {"fc": fc, "BaoGaoQi": date}
url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetShiDaLiuTongGuDong'
response = requests.post(url, json=data)
response.encoding = 'utf-8'
items: List[dict] = jsonpath(
response.json(), f'$..ShiDaLiuTongGuDongList[:]')
if not items:
continue
df = pd.DataFrame(items)
df.rename(columns=fields, inplace=True)
df.insert(0, '股票代码', [stock_code for _ in range(len(df))])
df.insert(1, '更新日期', [date for _ in range(len(df))])
del df['IsLink']
dfs.append(df)
if len(dfs) == 0:
return empty_df
return pd.concat(dfs, axis=0, ignore_index=True)
def get_all_report_dates() -> pd.DataFrame:
"""
获取沪深市场的全部股票报告期信息
Returns
-------
DataFrame
沪深市场的全部股票报告期信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_all_report_dates()
报告日期 季报名称
0 2021-06-30 2021年 半年报
1 2021-03-31 2021年 一季报
2 2020-12-31 2020年 年报
3 2020-09-30 2020年 三季报
4 2020-06-30 2020年 半年报
5 2020-03-31 2020年 一季报
6 2019-12-31 2019年 年报
7 2019-09-30 2019年 三季报
8 2019-06-30 2019年 半年报
9 2019-03-31 2019年 一季报
10 2018-12-31 2018年 年报
11 2018-09-30 2018年 三季报
12 2018-06-30 2018年 半年报
13 2018-03-31 2018年 一季报
14 2017-12-31 2017年 年报
15 2017-09-30 2017年 三季报
16 2017-06-30 2017年 半年报
17 2017-03-31 2017年 一季报
18 2016-12-31 2016年 年报
19 2016-09-30 2016年 三季报
20 2016-06-30 2016年 半年报
21 2016-03-31 2016年 一季报
22 2015-12-31 2015年 年报
24 2015-06-30 2015年 半年报
25 2015-03-31 2015年 一季报
26 2014-12-31 2014年 年报
27 2014-09-30 2014年 三季报
28 2014-06-30 2014年 半年报
29 2014-03-31 2014年 一季报
30 2013-12-31 2013年 年报
31 2013-09-30 2013年 三季报
32 2013-06-30 2013年 半年报
33 2013-03-31 2013年 一季报
34 2012-12-31 2012年 年报
35 2012-09-30 2012年 三季报
36 2012-06-30 2012年 半年报
37 2012-03-31 2012年 一季报
38 2011-12-31 2011年 年报
39 2011-09-30 2011年 三季报
"""
fields = {
'REPORT_DATE': '报告日期',
'DATATYPE': '季报名称'
}
params = (
('type', 'RPT_LICO_FN_CPD_BBBQ'),
('sty', ','.join(fields.keys())),
('p', '1'),
('ps', '2000'),
)
url = 'https://datacenter.eastmoney.com/securities/api/data/get'
response = requests.get(
url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
pd.DataFrame(columns=fields.values())
df = pd.DataFrame(items)
df = df.rename(columns=fields)
df['报告日期'] = df['报告日期'].apply(lambda x: x.split()[0])
return df
@to_numeric
def get_all_company_performance(date: str = None) -> pd.DataFrame:
"""
获取沪深市场股票某一季度的表现情况
Parameters
----------
date : str, optional
报告发布日期 部分可选示例如下(默认为 ``None``)
- ``None`` : 最新季报
- ``'2021-06-30'`` : 2021 年 Q2 季度报
- ``'2021-03-31'`` : 2021 年 Q1 季度报
Returns
-------
DataFrame
获取沪深市场股票某一季度的表现情况
Examples
---------
>>> import efinance as ef
>>> # 获取最新季度业绩表现
>>> ef.stock.get_all_company_performance()
股票代码 股票简称 公告日期 营业收入 营业收入同比增长 营业收入季度环比 净利润 净利润同比增长 净利润季度环比 每股收益 每股净资产 净资产收益率 销售毛利率 每股经营现金流量
0 688981 中芯国际 2021-08-28 00:00:00 1.609039e+10 22.253453 20.6593 5.241321e+09 278.100000 307.8042 0.6600 11.949525 5.20 26.665642 1.182556
1 688819 天能股份 2021-08-28 00:00:00 1.625468e+10 9.343279 23.9092 6.719446e+08 -14.890000 -36.8779 0.7100 11.902912 6.15 17.323263 -1.562187
2 688789 宏华数科 2021-08-28 00:00:00 4.555604e+08 56.418441 6.5505 1.076986e+08 49.360000 -7.3013 1.8900 14.926761 13.51 43.011243 1.421272
3 688681 科汇股份 2021-08-28 00:00:00 1.503343e+08 17.706987 121.9407 1.664509e+07 -13.100000 383.3331 0.2100 5.232517 4.84 47.455511 -0.232395
4 688670 金迪克 2021-08-28 00:00:00 3.209423e+07 -63.282413 -93.1788 -2.330505e+07 -242.275001 -240.1554 -0.3500 3.332254 -10.10 85.308531 1.050348
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
3720 600131 国网信通 2021-07-16 00:00:00 2.880378e+09 6.787087 69.5794 2.171389e+08 29.570000 296.2051 0.1800 4.063260 4.57 19.137437 -0.798689
3721 600644 乐山电力 2021-07-15 00:00:00 1.257030e+09 18.079648 5.7300 8.379727e+07 -14.300000 25.0007 0.1556 3.112413 5.13 23.645137 0.200906
3722 002261 拓维信息 2021-07-15 00:00:00 8.901777e+08 47.505282 24.0732 6.071063e+07 68.320000 30.0596 0.0550 2.351598 2.37 37.047968 -0.131873
3723 601952 苏垦农发 2021-07-13 00:00:00 4.544138e+09 11.754570 47.8758 3.288132e+08 1.460000 83.1486 0.2400 3.888046 6.05 15.491684 -0.173772
3724 601568 北元集团 2021-07-09 00:00:00 6.031506e+09 32.543303 30.6352 1.167989e+09 61.050000 40.8165 0.3200 3.541533 9.01 27.879243 0.389860
>>> # 获取指定日期的季度业绩表现
>>> ef.stock.get_all_company_performance('2020-03-31')
股票代码 股票简称 公告日期 营业收入 营业收入同比增长 营业收入季度环比 净利润 净利润同比增长 净利润季度环比 每股收益 每股净资产 净资产收益率 销售毛利率 每股经营现金流量
0 605033 美邦股份 2021-08-25 00:00:00 2.178208e+08 NaN NaN 4.319814e+07 NaN NaN 0.4300 NaN NaN 37.250416 NaN
1 301048 金鹰重工 2021-07-30 00:00:00 9.165528e+07 NaN NaN -2.189989e+07 NaN NaN NaN NaN -1.91 20.227118 NaN
2 001213 中铁特货 2021-07-29 00:00:00 1.343454e+09 NaN NaN -3.753634e+07 NaN NaN -0.0100 NaN NaN -1.400708 NaN
3 605588 冠石科技 2021-07-28 00:00:00 1.960175e+08 NaN NaN 1.906751e+07 NaN NaN 0.3500 NaN NaN 16.324650 NaN
4 688798 艾为电子 2021-07-27 00:00:00 2.469943e+08 NaN NaN 2.707568e+07 NaN NaN 0.3300 NaN 8.16 33.641934 NaN
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
4440 603186 华正新材 2020-04-09 00:00:00 4.117502e+08 -6.844813 -23.2633 1.763252e+07 18.870055 -26.3345 0.1400 5.878423 2.35 18.861255 0.094249
4441 002838 道恩股份 2020-04-09 00:00:00 6.191659e+08 -8.019810 -16.5445 6.939886e+07 91.601624 76.7419 0.1700 2.840665 6.20 22.575224 0.186421
4442 600396 金山股份 2020-04-08 00:00:00 2.023133e+09 0.518504 -3.0629 1.878432e+08 114.304022 61.2733 0.1275 1.511012 8.81 21.422393 0.085698
4443 002913 奥士康 2020-04-08 00:00:00 4.898977e+08 -3.883035 -23.2268 2.524717e+07 -47.239162 -58.8136 0.1700 16.666749 1.03 22.470020 0.552624
4444 002007 华兰生物 2020-04-08 00:00:00 6.775414e+08 -2.622289 -36.1714 2.472864e+08 -4.708821 -22.6345 0.1354 4.842456 3.71 61.408522 0.068341
Notes
-----
当输入的日期不正确时,会输出可选的日期列表。
你也可以通过函数 ``efinance.stock.get_all_report_dates`` 来获取可选日期
"""
# TODO 加速
fields = {
'SECURITY_CODE': '股票代码',
'SECURITY_NAME_ABBR': '股票简称',
'NOTICE_DATE': '公告日期',
'TOTAL_OPERATE_INCOME': '营业收入',
'YSTZ': '营业收入同比增长',
'YSHZ': '营业收入季度环比',
'PARENT_NETPROFIT': '净利润',
'SJLTZ': '净利润同比增长',
'SJLHZ': '净利润季度环比',
'BASIC_EPS': '每股收益',
'BPS': '每股净资产',
'WEIGHTAVG_ROE': '净资产收益率',
'XSMLL': '销售毛利率',
'MGJYXJJE': '每股经营现金流量'
# 'ISNEW':'是否最新'
}
dates = get_all_report_dates()['报告日期'].to_list()
if date is None:
date = dates[0]
if date not in dates:
rich.print('日期输入有误,可选日期如下:')
rich.print(dates)
return pd.DataFrame(columns=fields.values())
date = f"(REPORTDATE=\'{date}\')"
page = 1
dfs: List[pd.DataFrame] = []
while 1:
params = (
('st', 'NOTICE_DATE,SECURITY_CODE'),
('sr', '-1,-1'),
('ps', '500'),
('p', f'{page}'),
('type', 'RPT_LICO_FN_CPD'),
('sty', 'ALL'),
('token', '894050c76af8597a853f5b408b759f5d'),
# ! 只选沪深A股
('filter',
f'(SECURITY_TYPE_CODE in ("058001001","058001008")){date}'),
)
url = 'http://datacenter-web.eastmoney.com/api/data/get'
response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
df = pd.DataFrame(items)
dfs.append(df)
page += 1
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = pd.concat(dfs, axis=0, ignore_index=True)
df = df.rename(columns=fields)[fields.values()]
return df
@to_numeric
def get_latest_holder_number(date: str = None) -> pd.DataFrame:
"""
获取沪深A股市场最新公开的股东数目变化情况 也可获取指定报告期的股东数目变化情况
Parameters
----------
date : str, optional
报告期日期 部分可选示例如下
- ``None`` 最新的报告期
- ``'2021-06-30'`` 2021年中报
- ``'2021-03-31'`` 2021年一季报
Returns
-------
DataFrame
沪深A股市场最新公开的或指定报告期的股东数目变化情况
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_latest_holder_number()
股票代码 股票名称 股东人数 股东人数增减 较上期变化百分比 股东户数统计截止日 户均持股市值 户均持股数量 总市值 总股本 公告日期
0 301029 怡合达 12021 -1.636527 -200.0 2021-09-30 00:00:00 2.790187e+06 33275.933783 3.354084e+10 400010000 2021-10-09 00:00:00
1 301006 迈拓股份 10964 -0.463005 -51.0 2021-09-30 00:00:00 3.493433e+05 12703.392922 3.830200e+09 139280000 2021-10-09 00:00:00
2 301003 江苏博云 11642 -2.658863 -318.0 2021-09-30 00:00:00 2.613041e+05 5004.867463 3.042103e+09 58266667 2021-10-09 00:00:00
3 300851 交大思诺 12858 -2.752987 -364.0 2021-09-30 00:00:00 2.177054e+05 6761.035931 2.799255e+09 86933400 2021-10-09 00:00:00
4 300830 金现代 34535 -16.670688 -6909.0 2021-09-30 00:00:00 2.001479e+05 12454.756045 6.912109e+09 430125000 2021-10-09 00:00:00
... ... ... ... ... ... ... ... ... ... ... ...
4435 600618 氯碱化工 45372 -0.756814 -346.0 2014-06-30 00:00:00 1.227918e+05 16526.491581 5.571311e+09 749839976 2014-08-22 00:00:00
4436 601880 辽港股份 89923 -3.589540 -3348.0 2014-03-31 00:00:00 9.051553e+04 37403.111551 8.139428e+09 3363400000 2014-04-30 00:00:00
4437 600685 中船防务 52296 -4.807325 -2641.0 2014-03-11 00:00:00 1.315491e+05 8384.263691 6.879492e+09 438463454 2014-03-18 00:00:00
4438 000017 深中华A 21358 -10.800200 -2586.0 2013-06-30 00:00:00 5.943993e+04 14186.140556 1.269518e+09 302987590 2013-08-24 00:00:00
4439 601992 金隅集团 66736 -12.690355 -9700.0 2013-06-30 00:00:00 2.333339e+05 46666.785918 1.557177e+10 3114354625 2013-08-22 00:00:00
>>> ef.stock.get_latest_holder_number(date='2021-06-30')
股票代码 股票名称 股东人数 股东人数增减 较上期变化百分比 股东户数统计截止日 户均持股市值 户均持股数量 总市值 总股本 公告日期
0 688768 容知日新 24 0.000000 0.0 2021-06-30 00:00:00 NaN 1.714395e+06 NaN 41145491 2021-08-31 00:00:00
1 688669 聚石化学 8355 -11.135929 -1047.0 2021-06-30 00:00:00 3.662956e+05 1.117096e+04 3.060400e+09 93333334 2021-08-31 00:00:00
2 688613 奥精医疗 8768 -71.573999 -22077.0 2021-06-30 00:00:00 1.380627e+06 1.520681e+04 1.210533e+10 133333334 2021-08-31 00:00:00
3 688586 江航装备 20436 -5.642257 -1222.0 2021-06-30 00:00:00 5.508121e+05 1.975653e+04 1.125640e+10 403744467 2021-08-31 00:00:00
4 688559 海目星 7491 -16.460355 -1476.0 2021-06-30 00:00:00 8.071019e+05 2.669871e+04 6.046000e+09 200000000 2021-08-31 00:00:00
... ... ... ... ... ... ... ... ... ... ... ...
4292 002261 拓维信息 144793 0.931290 1336.0 2021-06-30 00:00:00 7.731589e+04 7.602349e+03 1.119480e+10 1100766874 2021-07-15 00:00:00
4293 002471 中超控股 75592 1.026409 768.0 2021-06-30 00:00:00 4.864536e+04 1.677426e+04 3.677200e+09 1268000000 2021-07-12 00:00:00
4294 600093 *ST易见 52497 -2.118099 -1136.0 2021-06-30 00:00:00 1.267904e+05 2.138117e+04 6.656114e+09 1122447500 2021-07-06 00:00:00
4295 688091 上海谊众 25 0.000000 0.0 2021-06-30 00:00:00 NaN 3.174000e+06 NaN 79350000 2021-07-02 00:00:00
4296 301053 远信工业 10 0.000000 0.0 2021-06-30 00:00:00 NaN 6.131250e+06 NaN 61312500 2021-06-30 00:00:00
"""
dfs: List[pd.DataFrame] = []
if date is not None:
date: datetime = datetime.strptime(date, '%Y-%m-%d')
year = date.year
month = date.month
if month % 3 != 0:
month -= month % 3
# TODO 优化处理月份正确但日期不为月份最后一天的逻辑
if month < 3:
year -= 1
# NOTE 对应上一年最后一个月
month = 12
_, last_day = calendar.monthrange(year, month)
date: str = datetime.strptime(
f'{year}-{month}-{last_day}', '%Y-%m-%d').strftime('%Y-%m-%d')
page = 1
fields = {
'SECURITY_CODE': '股票代码',
'SECURITY_NAME_ABBR': '股票名称',
'HOLDER_NUM': '股东人数',
'HOLDER_NUM_RATIO': '股东人数增减',
'HOLDER_NUM_CHANGE': '较上期变化百分比',
'END_DATE': '股东户数统计截止日',
'AVG_MARKET_CAP': '户均持股市值',
'AVG_HOLD_NUM': '户均持股数量',
'TOTAL_MARKET_CAP': '总市值',
'TOTAL_A_SHARES': '总股本',
'HOLD_NOTICE_DATE': '公告日期'
}
while 1:
params = [
('sortColumns', 'HOLD_NOTICE_DATE,SECURITY_CODE'),
('sortTypes', '-1,-1'),
('pageSize', '500'),
('pageNumber', page),
('columns', 'SECURITY_CODE,SECURITY_NAME_ABBR,END_DATE,INTERVAL_CHRATE,AVG_MARKET_CAP,AVG_HOLD_NUM,TOTAL_MARKET_CAP,TOTAL_A_SHARES,HOLD_NOTICE_DATE,HOLDER_NUM,PRE_HOLDER_NUM,HOLDER_NUM_CHANGE,HOLDER_NUM_RATIO,END_DATE,PRE_END_DATE'),
('quoteColumns', 'f2,f3'),
('source', 'WEB'),
('client', 'WEB'),
]
if date is not None:
# NOTE 注意不能漏 \'
params.append(('filter', f'(END_DATE=\'{date}\')'))
params.append(('reportName', 'RPT_HOLDERNUM_DET'))
else:
params.append(('reportName', 'RPT_HOLDERNUMLATEST'))
params = tuple(params)
url = 'http://datacenter-web.eastmoney.com/api/data/v1/get'
response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
df = pd.DataFrame(items)
df = df.rename(columns=fields)[fields.values()]
page += 1
dfs.append(df)
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = pd.concat(dfs, ignore_index=True)
return df
@to_numeric
@retry(tries=3)
def get_daily_billboard(start_date: str = None,
end_date: str = None) -> pd.DataFrame:
"""
获取指定日期区间的龙虎榜详情数据
Parameters
----------
start_date : str, optional
开始日期
部分可选示例如下
- ``None`` 最新一个榜单公开日(默认值)
- ``"2021-08-27"`` 2021年8月27日
end_date : str, optional
结束日期
部分可选示例如下
- ``None`` 最新一个榜单公开日(默认值)
- ``"2021-08-31"`` 2021年8月31日
Returns
-------
DataFrame
龙虎榜详情数据
Examples
--------
>>> import efinance as ef
>>> # 获取最新一个公开的龙虎榜数据(后面还有获取指定日期区间的示例代码)
>>> ef.stock.get_daily_billboard()
股票代码 股票名称 上榜日期 解读 收盘价 涨跌幅 换手率 龙虎榜净买额 龙虎榜买入额 龙虎榜卖出额 龙虎榜成交额 市场总成交额 净买额占总成交比 成交额占总成交比 流通市值 上榜原因
0 000608 阳光股份 2021-08-27 卖一主卖,成功率48.36% 3.73 -9.9034 3.8430 -8.709942e+06 1.422786e+07 2.293780e+07 3.716565e+07 110838793 -7.858208 33.531268 2.796761e+09 日跌幅偏离值达到7%的前5只证券
1 000751 锌业股份 2021-08-27 主力做T,成功率18.84% 5.32 -2.9197 19.6505 -1.079219e+08 5.638899e+07 1.643109e+08 2.206999e+08 1462953973 -7.376984 15.085906 7.500502e+09 日振幅值达到15%的前5只证券
2 000762 西藏矿业 2021-08-27 北京资金买入,成功率39.42% 63.99 1.0741 15.6463 2.938758e+07 4.675541e+08 4.381665e+08 9.057206e+08 4959962598 0.592496 18.260633 3.332571e+10 日振幅值达到15%的前5只证券
3 000833 粤桂股份 2021-08-27 实力游资买入,成功率44.55% 8.87 10.0496 8.8263 4.993555e+07 1.292967e+08 7.936120e+07 2.086580e+08 895910429 5.573721 23.290046 3.353614e+09 连续三个交易日内,涨幅偏离值累计达到20%的证券
4 001208 华菱线缆 2021-08-27 1家机构买入,成功率40.43% 19.72 4.3386 46.1985 4.055258e+07 1.537821e+08 1.132295e+08 2.670117e+08 1203913048 3.368398 22.178651 2.634710e+09 日换手率达到20%的前5只证券
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
70 688558 国盛智科 2021-08-27 买一主买,成功率38.71% 60.72 1.6064 34.0104 1.835494e+07 1.057779e+08 8.742293e+07 1.932008e+08 802569300 2.287023 24.072789 2.321743e+09 有价格涨跌幅限制的日换手率达到30%的前五只证券
71 688596 正帆科技 2021-08-27 1家机构买入,成功率57.67% 26.72 3.1660 3.9065 -1.371039e+07 8.409046e+07 9.780085e+07 1.818913e+08 745137400 -1.839982 24.410438 4.630550e+09 有价格涨跌幅限制的连续3个交易日内收盘价格涨幅偏离值累计达到30%的证券
72 688663 新风光 2021-08-27 卖一主卖,成功率37.18% 28.17 -17.6316 32.2409 1.036460e+07 5.416901e+07 4.380440e+07 9.797341e+07 274732700 3.772613 35.661358 8.492507e+08 有价格涨跌幅限制的日收盘价格跌幅达到15%的前五只证券
73 688663 新风光 2021-08-27 卖一主卖,成功率37.18% 28.17 -17.6316 32.2409 1.036460e+07 5.416901e+07 4.380440e+07 9.797341e+07 274732700 3.772613 35.661358 8.492507e+08 有价格涨跌幅限制的日换手率达到30%的前五只证券
74 688667 菱电电控 2021-08-27 1家机构卖出,成功率49.69% 123.37 -18.8996 17.7701 -2.079877e+06 4.611216e+07 4.819204e+07 9.430420e+07 268503400 -0.774618 35.122163 1.461225e+09 有价格涨跌幅限制的日收盘价格跌幅达到15%的前五只证券
>>> # 获取指定日期区间的龙虎榜数据
>>> start_date = '2021-08-20' # 开始日期
>>> end_date = '2021-08-27' # 结束日期
>>> ef.stock.get_daily_billboard(start_date = start_date,end_date = end_date)
股票代码 股票名称 上榜日期 解读 收盘价 涨跌幅 换手率 龙虎榜净买额 龙虎榜买入额 龙虎榜卖出额 龙虎榜成交额 市场总成交额 净买额占总成交比 成交额占总成交比 流通市值 上榜原因
0 000608 阳光股份 2021-08-27 卖一主卖,成功率48.36% 3.73 -9.9034 3.8430 -8.709942e+06 1.422786e+07 2.293780e+07 3.716565e+07 110838793 -7.858208 33.531268 2.796761e+09 日跌幅偏离值达到7%的前5只证券
1 000751 锌业股份 2021-08-27 主力做T,成功率18.84% 5.32 -2.9197 19.6505 -1.079219e+08 5.638899e+07 1.643109e+08 2.206999e+08 1462953973 -7.376984 15.085906 7.500502e+09 日振幅值达到15%的前5只证券
2 000762 西藏矿业 2021-08-27 北京资金买入,成功率39.42% 63.99 1.0741 15.6463 2.938758e+07 4.675541e+08 4.381665e+08 9.057206e+08 4959962598 0.592496 18.260633 3.332571e+10 日振幅值达到15%的前5只证券
3 000833 粤桂股份 2021-08-27 实力游资买入,成功率44.55% 8.87 10.0496 8.8263 4.993555e+07 1.292967e+08 7.936120e+07 2.086580e+08 895910429 5.573721 23.290046 3.353614e+09 连续三个交易日内,涨幅偏离值累计达到20%的证券
4 001208 华菱线缆 2021-08-27 1家机构买入,成功率40.43% 19.72 4.3386 46.1985 4.055258e+07 1.537821e+08 1.132295e+08 2.670117e+08 1203913048 3.368398 22.178651 2.634710e+09 日换手率达到20%的前5只证券
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
414 605580 恒盛能源 2021-08-20 买一主买,成功率33.33% 13.28 10.0249 0.4086 2.413149e+06 2.713051e+06 2.999022e+05 3.012953e+06 2713051 88.945937 111.054054 6.640000e+08 有价格涨跌幅限制的日收盘价格涨幅偏离值达到7%的前三只证券
415 688029 南微医学 2021-08-20 4家机构卖出,成功率55.82% 204.61 -18.5340 8.1809 -1.412053e+08 1.883342e+08 3.295394e+08 5.178736e+08 762045800 -18.529760 67.958326 9.001510e+09 有价格涨跌幅限制的日收盘价格跌幅达到15%的前五只证券
416 688408 中信博 2021-08-20 4家机构卖出,成功率47.86% 179.98 -0.0666 15.3723 -4.336304e+07 3.750919e+08 4.184550e+08 7.935469e+08 846547400 -5.122340 93.739221 5.695886e+09 有价格涨跌幅限制的日价格振幅达到30%的前五只证券
417 688556 高测股份 2021-08-20 上海资金买入,成功率60.21% 51.97 17.0495 10.6452 -3.940045e+07 1.642095e+08 2.036099e+08 3.678194e+08 575411600 -6.847351 63.922831 5.739089e+09 有价格涨跌幅限制的日收盘价格涨幅达到15%的前五只证券
418 688636 智明达 2021-08-20 2家机构买入,成功率47.37% 161.90 15.8332 11.9578 2.922406e+07 6.598126e+07 3.675721e+07 1.027385e+08 188330100 15.517464 54.552336 1.647410e+09 有价格涨跌幅限制的日收盘价格涨幅达到15%的前五只证券
"""
today = datetime.today().date()
mode = 'auto'
if start_date is None:
start_date = today
if end_date is None:
end_date = today
if isinstance(start_date, str):
mode = 'user'
start_date = datetime.strptime(start_date, '%Y-%m-%d')
if isinstance(end_date, str):
mode = 'user'
end_date = datetime.strptime(end_date, '%Y-%m-%d')
fields = EASTMONEY_STOCK_DAILY_BILL_BOARD_FIELDS
bar: tqdm = None
while 1:
dfs: List[pd.DataFrame] = []
page = 1
while 1:
params = (
('sortColumns', 'TRADE_DATE,SECURITY_CODE'),
('sortTypes', '-1,1'),
('pageSize', '500'),
('pageNumber', page),
('reportName', 'RPT_DAILYBILLBOARD_DETAILS'),
('columns', 'ALL'),
('source', 'WEB'),
('client', 'WEB'),
('filter',
f"(TRADE_DATE<='{end_date}')(TRADE_DATE>='{start_date}')"),
)
url = 'http://datacenter-web.eastmoney.com/api/data/v1/get'
response = session.get(url, params=params)
if bar is None:
pages = jsonpath(response.json(), '$..pages')
if pages and pages[0] != 1:
total = pages[0]
bar = tqdm(total=int(total))
if bar is not None:
bar.update()
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
page += 1
df = pd.DataFrame(items).rename(columns=fields)[fields.values()]
dfs.append(df)
if mode == 'user':
break
if len(dfs) == 0:
start_date = start_date-timedelta(1)
end_date = end_date-timedelta(1)
if len(dfs) > 0:
break
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = pd.concat(dfs, ignore_index=True)
df['上榜日期'] = df['上榜日期'].astype('str').apply(lambda x: x.split(' ')[0])
return df
def get_members(index_code: str) -> pd.DataFrame:
"""
获取指数成分股信息
Parameters
----------
index_code : str
指数名称或者指数代码
Returns
-------
DataFrame
指数成分股信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_members('000300')
指数代码 指数名称 股票代码 股票名称 股票权重
0 000300 沪深300 600519 贵州茅台 4.77
1 000300 沪深300 601398 工商银行 3.46
2 000300 沪深300 601939 建设银行 3.12
3 000300 沪深300 600036 招商银行 2.65
4 000300 沪深300 601857 中国石油 2.37
.. ... ... ... ... ...
295 000300 沪深300 688126 沪硅产业 NaN
296 000300 沪深300 688169 石头科技 NaN
297 000300 沪深300 688036 传音控股 NaN
298 000300 沪深300 688009 中国通号 NaN
299 000300 沪深300 688008 澜起科技 NaN
>>> ef.stock.get_members('中证白酒')
指数代码 指数名称 股票代码 股票名称 股票权重
0 399997 中证白酒 600519 贵州茅台 49.25
1 399997 中证白酒 000858 五粮液 18.88
2 399997 中证白酒 600809 山西汾酒 8.45
3 399997 中证白酒 000568 泸州老窖 7.03
4 399997 中证白酒 002304 洋河股份 5.72
5 399997 中证白酒 000596 古井贡酒 2.76
6 399997 中证白酒 000799 酒鬼酒 1.77
7 399997 中证白酒 600779 水井坊 1.36
8 399997 中证白酒 603369 今世缘 1.26
9 399997 中证白酒 603198 迎驾贡酒 0.89
10 399997 中证白酒 603589 口子窖 0.67
11 399997 中证白酒 000860 顺鑫农业 0.59
12 399997 中证白酒 600559 老白干酒 0.44
13 399997 中证白酒 603919 金徽酒 0.39
14 399997 中证白酒 600197 伊力特 0.28
15 399997 中证白酒 600199 金种子酒 0.26
"""
fields = {
'IndexCode': '指数代码',
'IndexName': '指数名称',
'StockCode': '股票代码',
'StockName': '股票名称',
'MARKETCAPPCT': '股票权重'
}
qs = search_quote(index_code, count=10)
df = pd.DataFrame(columns=fields.values())
if not qs:
return df
for q in qs:
if q.security_typeName == '指数':
params = (
('IndexCode', f'{q.code}'),
('pageIndex', '1'),
('pageSize', '10000'),
('deviceid', '1234567890'),
('version', '6.9.9'),
('product', 'EFund'),
('plat', 'Iphone'),
('ServerVersion', '6.9.9'),
)
url = 'https://fundztapi.eastmoney.com/FundSpecialApiNew/FundSpecialZSB30ZSCFG'
json_response = requests.get(
url,
params=params,
headers=EASTMONEY_REQUEST_HEADERS).json()
items = json_response['Datas']
# NOTE 这是为了跳过排在前面但无法获取成分股的指数 例如搜索 白酒 时排在前面的 980031
if not items:
continue
df: pd.DataFrame = pd.DataFrame(items).rename(
columns=fields)[fields.values()]
df['股票权重'] = pd.to_numeric(df['股票权重'], errors='coerce')
return df
return df
def get_latest_ipo_info() -> pd.DataFrame:
"""
获取企业 IPO 审核状态
Returns
-------
DataFrame
企业 IPO 审核状态
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_latest_ipo_info()
发行人全称 审核状态 注册地 证监会行业 保荐机构 会计师事务所 更新日期 受理日期 拟上市地点
0 郑州众智科技股份有限公司 已问询 河南 电气机械和器材制造业 民生证券股份有限公司 信永中和会计师事务所(特殊普通合伙) 2021-10-09 00:00:00 2021-06-24 00:00:00 创业板
1 成都盛帮密封件股份有限公司 已问询 四川 橡胶和塑料制品业 国金证券股份有限公司 中审众环会计师事务所(特殊普通合伙) 2021-10-09 00:00:00 2020-12-08 00:00:00 创业板
2 恒勃控股股份有限公司 已问询 浙江 汽车制造业 中信建投证券股份有限公司 中汇会计师事务所(特殊普通合伙) 2021-10-08 00:00:00 2021-09-06 00:00:00 创业板
3 深圳英集芯科技股份有限公司 已问询 广东 计算机、通信和其他电子设备制造业 华泰联合证券有限责任公司 容诚会计师事务所(特殊普通合伙) 2021-10-08 00:00:00 2021-06-10 00:00:00 科创板
4 苏州长光华芯光电技术股份有限公司 上市委会议通过 江苏 计算机、通信和其他电子设备制造业 华泰联合证券有限责任公司 天衡会计师事务所(特殊普通合伙) 2021-10-08 00:00:00 2021-06-24 00:00:00 科创板
... ... ... .. ... ... ... ... ... ...
1376 澜起科技股份有限公司 注册生效 上海 计算机、通信和其他电子设备制造业 中信证券股份有限公司 瑞华会计师事务所(特殊普通合伙) 2019-06-26 00:00:00 2019-04-01 00:00:00 科创板
1377 浙江杭可科技股份有限公司 注册生效 浙江 专用设备制造业 国信证券股份有限公司 天健会计师事务所(特殊普通合伙) 2019-06-24 00:00:00 2019-04-15 00:00:00 科创板
1378 苏州天准科技股份有限公司 注册生效 江苏 专用设备制造业 海通证券股份有限公司 瑞华会计师事务所(特殊普通合伙) 2019-06-20 00:00:00 2019-04-02 00:00:00 科创板
1379 烟台睿创微纳技术股份有限公司 注册生效 山东 计算机、通信和其他电子设备制造业 中信证券股份有限公司 信永中和会计师事务所(特殊普通合伙) 2019-06-18 00:00:00 2019-03-22 00:00:00 科创板
1380 苏州华兴源创科技股份有限公司 注册生效 江苏 专用设备制造业 华泰联合证券有限责任公司 华普天健会计师事务所(特殊普通合伙) 2019-06-18 00:00:00 2019-03-27 00:00:00 科创板
"""
fields = {
# 'ORG_CODE':'发行人代码',
'ISSUER_NAME': '发行人全称',
'CHECK_STATUS': '审核状态',
'REG_ADDRESS': '注册地',
'CSRC_INDUSTRY': '证监会行业',
'RECOMMEND_ORG': '保荐机构',
'ACCOUNT_FIRM': '会计师事务所',
'UPDATE_DATE': '更新日期',
'ACCEPT_DATE': '受理日期',
'TOLIST_MARKET': '拟上市地点'
}
df = pd.DataFrame(columns=fields.values())
dfs: List[pd.DataFrame] = []
page = 1
while 1:
params = (
('st', 'UPDATE_DATE,SECURITY_CODE'),
('sr', '-1,-1'),
('ps', '500'),
('p', page),
('type', 'RPT_REGISTERED_INFO'),
('sty', 'ORG_CODE,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE'),
('token', '894050c76af8597a853f5b408b759f5d'),
('client', 'WEB'),
)
url = 'http://datacenter-web.eastmoney.com/api/data/get'
json_response = requests.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
items = jsonpath(json_response, '$..data[:]')
if not items:
break
page += 1
df = pd.DataFrame(items).rename(
columns=fields)[fields.values()]
dfs.append(df)
if len(dfs) == 0:
return df
df = pd.concat(dfs, ignore_index=True, axis=0)
return df
@retry(tries=3)
def get_quote_snapshot(stock_code: str) -> pd.Series:
"""
获取沪深市场股票最新行情快照
Parameters
----------
stock_code : str
股票代码
Returns
-------
Series
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_quote_snapshot('600519')
代码 600519
名称 贵州茅台
时间 15:59:30
涨跌额 -73.5
涨跌幅 -4.13
最新价 1707.0
昨收 1780.5
今开 1760.2
开盘 1760.2
最高 1768.0
最低 1703.8
均价 1726.65
涨停价 1958.55
跌停价 1602.45
换手率 0.39
成交量 49156
成交额 8487507456
卖1价 1708.0
卖2价 1708.75
卖4价 1709.6
卖5价 1709.63
买1价 1707.0
买2价 1706.99
买3价 1706.88
买4价 1706.87
买5价 1706.86
卖1数量 3.0
卖2数量 2.0
卖3数量 39.0
卖4数量 3.0
卖5数量 1.0
买1数量 17.0
买2数量 8.0
买3数量 10.0
买4数量 8.0
买5数量 21.0
dtype: object
"""
params = (
('id', stock_code),
('callback', 'jQuery183026310160411569883_1646052793441'),
)
response = requests.get(
'https://hsmarketwg.eastmoney.com/api/SHSZQuoteSnapshot', params=params)
start_index = response.text.find('{')
end_index = response.text.rfind('}')
columns = {
'code': '代码',
'name': '名称',
'time': '时间',
'zd': '涨跌额',
'zdf': '涨跌幅',
'currentPrice': '最新价',
'yesClosePrice': '昨收',
'openPrice': '今开',
'open': '开盘',
'high': '最高',
'low': '最低',
'avg': '均价',
'topprice': '涨停价',
'bottomprice': '跌停价',
'turnover': '换手率',
'volume': '成交量',
'amount': '成交额',
'sale1': '卖1价',
'sale2': '卖2价',
'sale3': '卖3价',
'sale4': '卖4价',
'sale5': '卖5价',
'buy1': '买1价',
'buy2': '买2价',
'buy3': '买3价',
'buy4': '买4价',
'buy5': '买5价',
'sale1_count': '卖1数量',
'sale2_count': '卖2数量',
'sale3_count': '卖3数量',
'sale4_count': '卖4数量',
'sale5_count': '卖5数量',
'buy1_count': '买1数量',
'buy2_count': '买2数量',
'buy3_count': '买3数量',
'buy4_count': '买4数量',
'buy5_count': '买5数量',
}
s = pd.Series(index=columns.values(), dtype='object')
try:
qd: dict = json.loads(response.text[start_index:end_index+1])
except:
return s
if not qd.get('fivequote'):
return s
d = {**qd.pop('fivequote'), **qd.pop('realtimequote'), **qd}
s = pd.Series(d).rename(index=columns)[columns.values()]
str_type_list = ['代码', '名称', '时间']
all_type_list = columns.values()
for column in (set(all_type_list)-set(str_type_list)):
s[column] = to_type(float, str(s[column]).strip('%'), np.nan)
return s
| 42.951184
| 245
| 0.529391
|
c0cf66ec52a04f18062ad91f6fe257d0b452a650
| 4,595
|
py
|
Python
|
examples/document_conversion_v1.py
|
wkddnjset/K-Global-Starthon
|
081db0127d23c0039f1a3fea73ee4af3bb176bbf
|
[
"Apache-2.0"
] | null | null | null |
examples/document_conversion_v1.py
|
wkddnjset/K-Global-Starthon
|
081db0127d23c0039f1a3fea73ee4af3bb176bbf
|
[
"Apache-2.0"
] | 2
|
2020-02-12T00:05:29.000Z
|
2020-06-05T17:51:24.000Z
|
examples/document_conversion_v1.py
|
wkddnjset/K-Global-Starthon
|
081db0127d23c0039f1a3fea73ee4af3bb176bbf
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import json
from os.path import join, dirname
from io import open
from watson_developer_cloud import DocumentConversionV1
'''
{
"url" : "https://gateway.aibril-watson.kr/document-conversion/api",
"username" : "61d34a43-4105-42a0-9ec8-01774c3ce1a6",
"password" : "ydCaaRag3Um5"
}
'''
document_conversion = DocumentConversionV1(
username='61d34a43-4105-42a0-9ec8-01774c3ce1a6',
password='ydCaaRag3Um5',
version='2016-02-09')
# Example of retrieving html or plain text
# with open(join(dirname(__file__), '../resources/thesis/sample.pdf'),
# encoding='ascii') as document:
# config = {'conversion_target': DocumentConversionV1.NORMALIZED_HTML}
# print(document_conversion.convert_document(
# document=document, config=config, media_type='text/html').content)
# Example with JSON
with open(join(dirname(__file__), '../resources/thesis/medical.pdf'),
'rb') as document:
config = {'conversion_target': DocumentConversionV1.NORMALIZED_TEXT}
config['conversion_target'] = DocumentConversionV1.ANSWER_UNITS
print(json.dumps(
document_conversion.convert_document(document=document, config=config),
indent=2))
# # Examples of index_document API
# print(
# "########## Example of a dry run of index_document with only a document "
# "##########")
# with open(join(dirname(__file__), '../resources/example.html'),
# encoding='utf8') as document:
# config = {
# 'retrieve_and_rank': {
# 'dry_run': 'true'
# }
# }
# print(json.dumps(
# document_conversion.index_document(config=config, document=document),
# indent=2))
# print(
# "########## Example of a dry run of index_document with only metadata "
# "##########")
# config = {
# 'retrieve_and_rank': {
# 'dry_run': 'true'
# }
# }
# metadata = {
# 'metadata': [
# {'name': 'id', 'value': '12345'}
# ]
# }
# print(
# json.dumps(
# document_conversion.index_document(config=config, metadata=metadata),
# indent=2))
# print(
# "########## Example of a dry run of index_document with document and "
# "metadata "
# "##########")
# with open(join(dirname(__file__), '../resources/example.html'),
# encoding='utf8') as document:
# config = {
# 'retrieve_and_rank': {
# 'dry_run': 'true'
# }
# }
# metadata = {
# 'metadata': [
# {'name': 'id', 'value': '12345'}
# ]
# }
# print(json.dumps(
# document_conversion.index_document(config=config, document=document,
# metadata=metadata), indent=2))
# print(
# "########## Example of a dry run of index_document with document, "
# "metadata, "
# "and additional config for conversion"
# "##########")
# with open(join(dirname(__file__), '../resources/example.html'),
# encoding='utf8') as document:
# config = {
# 'convert_document': {
# 'normalized_html': {
# 'exclude_content': {"xpaths": ["//body/div"]}
# }
# },
# 'retrieve_and_rank': {
# 'dry_run': 'true'
# }
# }
# metadata = {
# 'metadata': [
# {'name': 'id', 'value': '12345'}
# ]
# }
# print(json.dumps(
# document_conversion.index_document(config=config, document=document,
# metadata=metadata), indent=2))
# print("########## Example of index_document with document, metadata (A
# service instance id, SOLR cluster id, and "
# "a SOLR collection name must be provided from the Retrieve and Rank
# service in order to index) ##########")
# with open(join(dirname(__file__), '../resources/example.html'), 'r') as
# document:
# config = {
# 'retrieve_and_rank': {
# 'dry_run': 'false',
# 'service_instance_id': 'YOUR RETRIEVE AND RANK SERVICE INSTANCE
# ID',
# 'cluster_id': 'YOUR RETRIEVE AND RANK SERVICE SOLR CLUSTER ID',
# 'search_collection': 'YOUR RETRIEVE AND RANK SERVICE SOLR
# SEARCH COLLECTION NAME'
# }
# }
# metadata = {
# 'metadata': [
# {'name': 'id', 'value': '12345'}
# ]
# }
# print(json.dumps(document_conversion.index_document(config=config,
# document=document,
# metadata=metadata),
# indent=2))
| 33.057554
| 79
| 0.558651
|
3bbc5a00164415b31ed94573a839e6472f49dc7f
| 403
|
py
|
Python
|
statzcw/zmode.py
|
ZCW-Data1dot2/python-basic-stats-amanda-wink
|
b3db8db0e30feda598dea0fc77204420d1bbeafd
|
[
"MIT"
] | null | null | null |
statzcw/zmode.py
|
ZCW-Data1dot2/python-basic-stats-amanda-wink
|
b3db8db0e30feda598dea0fc77204420d1bbeafd
|
[
"MIT"
] | null | null | null |
statzcw/zmode.py
|
ZCW-Data1dot2/python-basic-stats-amanda-wink
|
b3db8db0e30feda598dea0fc77204420d1bbeafd
|
[
"MIT"
] | null | null | null |
def mode(list_in):
"""
Calculate the mode
:param list_in: A list
:return: float
"""
value = []
count = []
for val in list_in:
if val in value:
ind = value.index(val)
count[ind] += 1
else:
value.append(val)
count.append(1)
v = max(count)
max_ind = count.index(v)
return float(value[max_ind])
| 19.190476
| 35
| 0.496278
|
f88a9cecda3cb450f4612e7f5742399cfb57248a
| 3,817
|
py
|
Python
|
modules/song.py
|
sushantgirdhar/song-bot
|
3dd8b39d09c62340b5e344cbd463e48a957f4b90
|
[
"MIT"
] | null | null | null |
modules/song.py
|
sushantgirdhar/song-bot
|
3dd8b39d09c62340b5e344cbd463e48a957f4b90
|
[
"MIT"
] | null | null | null |
modules/song.py
|
sushantgirdhar/song-bot
|
3dd8b39d09c62340b5e344cbd463e48a957f4b90
|
[
"MIT"
] | null | null | null |
from pyrogram import Client, filters
import youtube_dl
from youtube_search import YoutubeSearch
import requests
import os
import time
from config import Config
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
ABS="Developer"
APPER="shamilhabeeb"
OWNER="Owner"
GITCLONE="sushantgirdhar.github.io"
B2="telegram.dog/sushantgirdhar"
BUTTON1="📜 More Info Here 📜"
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(':'))))
@Client.on_message(filters.command('start') & filters.private)
async def start(client, message):
await message.reply_photo(photo=Config.START_IMG, caption=Config.START_MSG.format(message.from_user.mention),
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(BUTTON1, url=GITCLONE)
],[
InlineKeyboardButton(OWNER, url=f"https://telegram.dog/{Config.OWNER}"),
InlineKeyboardButton(ABS, url=B2)
]
]
),
reply_to_message_id=message.message_id
)
@Client.on_message(filters.command(['song']))
def a(client, message):
query = ''
for i in message.command[1:]:
query += ' ' + str(i)
print(query)
m = message.reply('`Searching... Please Wait...`')
ydl_opts = {"format": "bestaudio[ext=m4a]"}
try:
results = []
count = 0
while len(results) == 0 and count < 6:
if count>0:
time.sleep(1)
results = YoutubeSearch(query, max_results=1).to_dict()
count += 1
# results = YoutubeSearch(query, max_results=1).to_dict()
try:
link = f"https://youtube.com{results[0]['url_suffix']}"
# print(results)
title = results[0]["title"]
thumbnail = results[0]["thumbnails"][0]
duration = results[0]["duration"]
views = results[0]["views"]
## UNCOMMENT THIS IF YOU WANT A LIMIT ON DURATION. CHANGE 1800 TO YOUR OWN PREFFERED DURATION AND EDIT THE MESSAGE (30 minutes cap) LIMIT IN SECONDS
# if time_to_seconds(duration) >= 7000: # duration limit
# m.edit("Exceeded 30mins cap")
# return
performer = f"[@mwkBoTs]"
thumb_name = f'thumb{message.message_id}.jpg'
thumb = requests.get(thumbnail, allow_redirects=True)
open(thumb_name, 'wb').write(thumb.content)
except Exception as e:
print(e)
m.edit('**👎 Nothing found Retry with another !**')
return
except Exception as e:
m.edit(
"**Enter Song Name with /song Command!**"
)
print(str(e))
return
m.edit("`Bruh... Uploading... Please Wait...`")
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
rep = f'🎶 <b>Title:</b> <a href="{link}">{title}</a>\n⌚ <b>Duration:</b> <code>{duration}</code>\n📻 <b>Uploaded By:</b> <a href="https://t.me/SushantGirdhar">Song Bot</a>'
secmul, dur, dur_arr = 1, 0, duration.split(':')
for i in range(len(dur_arr)-1, -1, -1):
dur += (int(dur_arr[i]) * secmul)
secmul *= 60
message.reply_audio(audio_file, caption=rep, parse_mode='HTML',quote=False, title=title, duration=dur, performer=performer, thumb=thumb_name)
m.delete()
except Exception as e:
m.edit('**An internal Error Occured, Report This !!**')
print(e)
try:
os.remove(audio_file)
os.remove(thumb_name)
except Exception as e:
print(e)
| 36.009434
| 179
| 0.59235
|
cf470a7b4c62da1895f273461a73fa83a511ab53
| 1,377
|
py
|
Python
|
settings.py
|
mattmc318/django-socketio-example
|
31d393af57966d32509176362d1cfb1a8eb53a7a
|
[
"BSD-3-Clause"
] | 23
|
2015-01-18T02:53:19.000Z
|
2021-12-04T09:12:20.000Z
|
settings.py
|
mattmc318/django-socketio-example
|
31d393af57966d32509176362d1cfb1a8eb53a7a
|
[
"BSD-3-Clause"
] | 3
|
2016-07-13T17:36:56.000Z
|
2018-03-24T21:18:08.000Z
|
settings.py
|
mattmc318/django-socketio-example
|
31d393af57966d32509176362d1cfb1a8eb53a7a
|
[
"BSD-3-Clause"
] | 16
|
2015-03-18T05:41:07.000Z
|
2021-07-08T21:20:13.000Z
|
import os
BASE_PATH = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
ADMIN_MEDIA_PREFIX = '/media/'
SECRET_KEY = 'aysxjo#0vhu3=%(49r_3xri@hv3y8tk_2s4jnhowp-9u7eo+tl'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(BASE_PATH, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
| 24.157895
| 65
| 0.740015
|
ed625a2737f43604845e16ca76828b40867728e6
| 3,105
|
py
|
Python
|
integration/tests/integration/tests/api/instances_states.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 1
|
2019-09-20T08:31:54.000Z
|
2019-09-20T08:31:54.000Z
|
integration/tests/integration/tests/api/instances_states.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
integration/tests/integration/tests/api/instances_states.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
GROUP = "dbaas.api.instances.status"
from proboscis import before_class
from proboscis import test
from proboscis.asserts import assert_equal
from trove.tests.config import CONFIG
from trove.tests.util import create_dbaas_client
from trove.tests.util.users import Requirements
from trove.common.utils import poll_until
@test(groups=[GROUP])
class InstanceStatusTests(object):
@before_class
def set_up(self):
reqs = Requirements(is_admin=False)
self.user = CONFIG.users.find_user(reqs)
self.dbaas = create_dbaas_client(self.user)
@test
def test_create_failure_on_volume_prov_failure(self):
# Fake nova will fail a volume of size 9.
response = self.dbaas.instances.create('volume_fail', 1,
{'size': 9}, [])
poll_until(lambda: self.dbaas.instances.get(response.id),
lambda instance: instance.status == 'ERROR',
time_out=10)
instance = self.dbaas.instances.get(response.id)
print("Status: %s" % instance.status)
assert_equal(instance.status, "ERROR",
"Instance did not drop to error after volume prov failure.")
@test
def test_create_failure_on_server_failure(self):
# Fake nova will fail a server ending with 'SERVER_ERROR'."
response = self.dbaas.instances.create('test_SERVER_ERROR', 1,
{'size': 1}, [])
poll_until(lambda: self.dbaas.instances.get(response.id),
lambda instance: instance.status == 'ERROR',
time_out=10)
instance = self.dbaas.instances.get(response.id)
print("Status: %s" % instance.status)
assert_equal(instance.status, "ERROR",
"Instance did not drop to error after server prov failure.")
###TODO(ed-): We don't at present have a way to test DNS in FAKE_MODE.
@test(enabled=False)
def test_create_failure_on_dns_failure(self):
#TODO(ed-): Throw DNS-specific monkeywrench into works
response = self.dbaas.instances.create('test_DNS_ERROR', 1,
{'size': 1}, [])
poll_until(lambda: self.dbaas.instances.get(response.id),
lambda instance: instance.status == 'ERROR',
time_out=10)
instance = self.dbaas.instances.get(response.id)
print("Status: %s" % instance.status)
assert_equal(instance.status, "ERROR",
"Instance did not drop to error after DNS prov failure.")
| 40.324675
| 78
| 0.667955
|
1fc9539d00e760ead2c87c83f83c1bdc87e9140c
| 5,816
|
py
|
Python
|
ros/src/waypoint_updater/waypoint_updater.py
|
Eudie/CarND-Capstone
|
cffb07c3a4d8f55da90f6161e161baca8a5ecd18
|
[
"MIT"
] | null | null | null |
ros/src/waypoint_updater/waypoint_updater.py
|
Eudie/CarND-Capstone
|
cffb07c3a4d8f55da90f6161e161baca8a5ecd18
|
[
"MIT"
] | null | null | null |
ros/src/waypoint_updater/waypoint_updater.py
|
Eudie/CarND-Capstone
|
cffb07c3a4d8f55da90f6161e161baca8a5ecd18
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
import math
from std_msgs.msg import Int32
from sensor_msgs.msg import PointCloud2
from scipy.spatial import KDTree
import numpy as np
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
# LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
LOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 1.0
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
rospy.Subscriber('/vehicle/obstacle_points', PointCloud2, self.obstacle_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.stopline_wp_idx = -1
self.base_lane = None
self.loop()
#rospy.spin()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints and self.waypoint_tree:
# Get closest waypoint
closest_waypoint_idx = self.get_closest_waypoint_idx()
self.publish_waypoints(closest_waypoint_idx)
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
#Check if closest is ahead or behind vechicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
#Equation for hyperplane through closest_coords
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self, closest_idx):
# lane = Lane()
# lane.header = self.base_waypoints.header
# lane.waypoints = self.base_waypoints.waypoints[closest_idx:closest_idx + LOOKAHEAD_WPS]
# self.final_waypoints_pub.publish(lane)
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
lane = Lane()
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_lane.waypoints[closest_idx:farthest_idx]
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):
lane.waypoints = base_waypoints
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)
return lane
def decelerate_waypoints(self, waypoints, closest_idx):
temp = []
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(self.stopline_wp_idx - closest_idx -2, 0) # Two waypoints back from line so front of car at line
dist = self.distance(waypoints, i, stop_idx)
# TODO we could use another algorithm. see 12. Full Waypoint Walkthrough 7:00
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel > 1.:
vel = 0.
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints
self.base_lane = waypoints
rospy.loginfo("test: len(waypoints): %s", len(self.base_waypoints.waypoints))
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x , waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.stopline_wp_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 34.619048
| 133
| 0.66489
|
32999b7e450d436f1f61071e2de975f75dd0549f
| 1,833
|
py
|
Python
|
Integration/integrate.py
|
Abdus-Samee/CSE-218
|
154b0d8abe56bf4e11f2d86c5043f7c022baf7f2
|
[
"MIT"
] | null | null | null |
Integration/integrate.py
|
Abdus-Samee/CSE-218
|
154b0d8abe56bf4e11f2d86c5043f7c022baf7f2
|
[
"MIT"
] | null | null | null |
Integration/integrate.py
|
Abdus-Samee/CSE-218
|
154b0d8abe56bf4e11f2d86c5043f7c022baf7f2
|
[
"MIT"
] | null | null | null |
import math
def func_val(x):
return (2000*math.log(140000/(140000-2100*x)))-9.8*x
def trapezoidIntegration(n, a, b):
h = (b-a)/n
first = func_val(a)
second = func_val(b)
s = 0
for i in range(1, n):
s += func_val(a+i*h)
ans = ((b-a) * (first + second + 2*s))/(2*n)
return ans
def simpsonIntegration(n, a, b):
s1 = 0
s2 = 0
t = []
t.append(func_val(a))
h = (b-a)/n
i = a
for _ in range(1, n):
t.append(func_val(i+h))
i += h
t.append(func_val(b))
for i in range(1, n):
if i%2 == 0:
s2 += t[i]
else:
s1 += t[i]
ans = ((b-a)*(t[0] + t[n] + 4*s1 + 2*s2))/(3*n)
return ans
n = int(input('Enter number of sub-intervals:'))
print()
print('Trapezoidal Integration:')
print('Distance traversed by the rocket:', trapezoidIntegration(n, 8, 30))
print('{:<10} {:<10} {:^12}'.format('Segment', 'Value', 'Absolute Relative Error'))
prevAns = -1
for i in range(1, 6):
ans = trapezoidIntegration(i, 8, 30)
if i != 1:
error = abs((ans-prevAns)/ans)*100
print("{:<10} {:<10.4f} {:^10.4f}".format(i, ans, error))
else:
print("{:<10} {:<10.4f} {:^10}".format(i, ans, "--"))
prevAns = ans
print()
print('Simpson\'s 1/3 Integration:')
print('Distance traversed by the rocket:', simpsonIntegration(2*n, 8, 30))
print('{:<10} {:<10} {:^12}'.format('Segment', 'Value', 'Absolute Relative Error'))
prevAns = -1
for i in range(2, 11, 2):
ans = simpsonIntegration(i, 8, 30)
if prevAns != -1:
error = abs((ans-prevAns)/ans)*100
print("{:<10} {:<10.4f} {:^10.4f}".format(i, ans, error))
else:
print("{:<10} {:<10.4f} {:^10}".format(i, ans, "--"))
prevAns = ans
| 25.816901
| 83
| 0.509002
|
ebc11ab89f21b21b1081275014f633ed9f1dccd3
| 353
|
py
|
Python
|
django/inclusion/migrations/0011_remove_structurereport_reporter.py
|
betagouv/data-inclusion
|
3423c55cb760899abe61125966e2053d8089fec7
|
[
"MIT"
] | null | null | null |
django/inclusion/migrations/0011_remove_structurereport_reporter.py
|
betagouv/data-inclusion
|
3423c55cb760899abe61125966e2053d8089fec7
|
[
"MIT"
] | null | null | null |
django/inclusion/migrations/0011_remove_structurereport_reporter.py
|
betagouv/data-inclusion
|
3423c55cb760899abe61125966e2053d8089fec7
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-05-03 15:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("inclusion", "0010_remove_structure_code_safir_pe"),
]
operations = [
migrations.RemoveField(
model_name="structurereport",
name="reporter",
),
]
| 19.611111
| 61
| 0.620397
|
9e615c32bec20430127529276766e2b998c6651e
| 1,781
|
py
|
Python
|
entrenandoRF.py
|
Flashstacks/facial-recognition
|
93998bc17819ff8862603cd2660f2abc5c356d24
|
[
"MIT"
] | null | null | null |
entrenandoRF.py
|
Flashstacks/facial-recognition
|
93998bc17819ff8862603cd2660f2abc5c356d24
|
[
"MIT"
] | null | null | null |
entrenandoRF.py
|
Flashstacks/facial-recognition
|
93998bc17819ff8862603cd2660f2abc5c356d24
|
[
"MIT"
] | null | null | null |
import cv2
import os
import numpy as np
dataPath = 'C:/Users/carl2/Desktop/Reconocimiento/Datos' #Cambia a la ruta donde hayas almacenado Data
peopleList = os.listdir(dataPath) #Lleva a la carpeta Datos
print('Lista de personas: ', peopleList) #Imprime los nombres de las carpetas con los nombres
labels = [] #Crea los strings globales
facesData = []
label = 0
for nameDir in peopleList:
personPath = dataPath + '/' + nameDir #Lée las imagenes a travez de un for con rutas
print('Leyendo las imágenes')
for fileName in os.listdir(personPath):
print('Rostros: ', nameDir + '/' + fileName) #Imprime la direccion de las imagenes
labels.append(label) #Agrega etiquetas al array
facesData.append(cv2.imread(personPath+'/'+fileName,0)) #Agrega y lee imagenes en el array
#image = cv2.imread(personPath+'/'+fileName,0)
#cv2.imshow('image',image)
#cv2.waitKey(10)
label = label + 1 #Aumenta el contador de las etiquetas
#print('labels= ',labels) #Imprime el array de las atiquetas
print('Número de etiquetas 0: ',np.count_nonzero(np.array(labels)==0)) #Imprime el numero de etiquetas de cada numero 0,1,2....
print('Número de etiquetas 1: ',np.count_nonzero(np.array(labels)==1))
# Métodos para entrenar el reconocedor
#face_recognizer = cv2.face.EigenFaceRecognizer_create()
#face_recognizer = cv2.face.FisherFaceRecognizer_create()
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
# Entrenando el reconocedor de rostros
print("Entrenando...")
face_recognizer.train(facesData, np.array(labels))
# Almacenando el modelo obtenido
#face_recognizer.write('modeloEigenFace.xml')
#face_recognizer.write('modeloFisherFace.xml')
face_recognizer.write('modeloLBPHFace.xml')
print("Modelo almacenado...Listo")
| 41.418605
| 129
| 0.735542
|
9c502951101e778409b546c7d866ebaaac2fe7ed
| 805
|
py
|
Python
|
var/spack/repos/builtin/packages/hydra/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-03-05T10:54:32.000Z
|
2021-03-05T14:14:52.000Z
|
var/spack/repos/builtin/packages/hydra/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32
|
2020-12-15T17:29:20.000Z
|
2022-03-21T15:08:31.000Z
|
var/spack/repos/builtin/packages/hydra/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-04-07T18:27:09.000Z
|
2022-03-31T22:52:38.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Hydra(AutotoolsPackage):
"""Hydra is a process management system for starting parallel jobs.
Hydra is designed to natively work with existing launcher daemons
(such as ssh, rsh, fork), as well as natively integrate with resource
management systems (such as slurm, pbs, sge)."""
homepage = "http://www.mpich.org"
url = "http://www.mpich.org/static/downloads/3.2/hydra-3.2.tar.gz"
list_url = "http://www.mpich.org/static/downloads/"
list_depth = 1
version('3.2', sha256='f7a67ec91a773d95cbbd479a80e926d44bee1ff9fc70a8d1df075ea53ea33889')
| 38.333333
| 93
| 0.732919
|
9460ca4fec02f7d76bc92af5d1c6d8b1ca346fc8
| 1,050
|
py
|
Python
|
Observer/property_observers.py
|
NachoCP/python-design-patterns
|
76a7834ff3f8d5c759935f3f5bd3c7c2a57ea112
|
[
"MIT"
] | null | null | null |
Observer/property_observers.py
|
NachoCP/python-design-patterns
|
76a7834ff3f8d5c759935f3f5bd3c7c2a57ea112
|
[
"MIT"
] | null | null | null |
Observer/property_observers.py
|
NachoCP/python-design-patterns
|
76a7834ff3f8d5c759935f3f5bd3c7c2a57ea112
|
[
"MIT"
] | null | null | null |
class Event(list):
def __call__(self, *args, **kwargs):
for item in self:
item(*args, **kwargs)
class PropertyObservable:
def __init__(self):
self.property_changed = Event()
class Person(PropertyObservable):
def __init__(self, age=0):
super().__init__()
self._age = age
@property
def age(self):
return self._age
@age.setter
def age(self, value):
if self._age == value:
return
self._age = value
self.property_changed('age', value)
class TrafficAuthority:
def __init__(self, person):
self.person = person
person.property_changed.append(self.person_changed)
def person_changed(self, name, value):
if name == 'age':
if value < 16:
print('Sorry, you still cannot drive')
else:
print('Okay, you can drive now')
self.person.property_changed.remove(
self.person_changed
)
if __name__ == '__main__':
p = Person()
ta = TrafficAuthority(p)
for age in range(14, 20):
print(f'Setting age to {age}')
p.age = age
| 21
| 55
| 0.634286
|
95b2c681e482c50a10df43838a3de2531293aae3
| 501
|
py
|
Python
|
app/tech_dict/tech_dict/wsgi.py
|
duoduo369/TechDict
|
82fbac72f2dcb24f12d185ea43cc7a6b9887f53b
|
[
"MIT"
] | 1
|
2016-06-29T23:55:00.000Z
|
2016-06-29T23:55:00.000Z
|
app/tech_dict/tech_dict/wsgi.py
|
duoduo369/TechDict
|
82fbac72f2dcb24f12d185ea43cc7a6b9887f53b
|
[
"MIT"
] | null | null | null |
app/tech_dict/tech_dict/wsgi.py
|
duoduo369/TechDict
|
82fbac72f2dcb24f12d185ea43cc7a6b9887f53b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
"""
WSGI config for tech_dict project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import sys
# 将系统的编码设置为UTF8
reload(sys)
sys.setdefaultencoding('utf8')
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tech_dict.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 23.857143
| 78
| 0.782435
|
fbbd61cd07ae6a3df18ee24449256507ee4a33e8
| 2,346
|
py
|
Python
|
src/typhoonae/tests/test_init.py
|
sprymak/typhoonae
|
fe31bcc7b21fc14f8aa97b36d66cd7671974543b
|
[
"Apache-2.0"
] | 1
|
2018-12-02T10:36:07.000Z
|
2018-12-02T10:36:07.000Z
|
src/typhoonae/tests/test_init.py
|
sprymak/typhoonae
|
fe31bcc7b21fc14f8aa97b36d66cd7671974543b
|
[
"Apache-2.0"
] | null | null | null |
src/typhoonae/tests/test_init.py
|
sprymak/typhoonae
|
fe31bcc7b21fc14f8aa97b36d66cd7671974543b
|
[
"Apache-2.0"
] | 1
|
2018-12-02T10:36:08.000Z
|
2018-12-02T10:36:08.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2009, 2010 Tobias Rodäbel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the runtime environment."""
import os
import re
import sys
import tempfile
import typhoonae
import unittest
class InitTestCase(unittest.TestCase):
"""Tests a number of helper functions."""
def setUp(self):
"""Loads the sample application."""
app_root = os.path.join(os.path.dirname(__file__), 'sample')
os.chdir(app_root)
sys.path.insert(0, os.getcwd())
self.conf = typhoonae.getAppConfig()
assert self.conf.application == 'sample'
def testSetupStubs(self):
"""Sets up apiproxy stubs."""
class TestOptions:
blobstore_path = 'blobstore'
datastore = 'mongodb'
http_port = 8080
internal_address = 'localhost:8770'
login_url = '/_ah/login'
logout_url = '/_ah/logout'
rdbms_sqlite_path = os.path.join(tempfile.gettempdir(), 'test.db')
server_name = 'localhost'
smtp_host = 'localhost'
smtp_port = 25
smtp_user = ''
smtp_password = ''
use_celery = False
websocket_host = 'localhost'
websocket_port = 8888
xmpp_host = 'localhost'
memcache = ''
typhoonae.setupStubs(self.conf, TestOptions())
def testInitURLMapping(self):
"""Initializes the url/script map."""
class TestOptions:
login_url = '/_ah/login'
logout_url = '/_ah/logout'
url_mapping = typhoonae.initURLMapping(self.conf, TestOptions())
for pattern, handler_path, path, login_required, admin_only in url_mapping:
if pattern.match('/foo'):
self.assertEqual(handler_path, 'app.py')
| 32.136986
| 83
| 0.631287
|
b60f1738b3a657723b66b568f27ccc89a2b212ad
| 4,614
|
py
|
Python
|
team_most_per.py
|
handshakinglemma/cwhlstatbot
|
fcb9fa8d27aafecc09845326935a2ddd18c21368
|
[
"MIT"
] | null | null | null |
team_most_per.py
|
handshakinglemma/cwhlstatbot
|
fcb9fa8d27aafecc09845326935a2ddd18c21368
|
[
"MIT"
] | null | null | null |
team_most_per.py
|
handshakinglemma/cwhlstatbot
|
fcb9fa8d27aafecc09845326935a2ddd18c21368
|
[
"MIT"
] | null | null | null |
# team_most_per
# Selects two random stats and returns the team with the most stat
# per the other stat. It recalculates if more than two teams are tied
# for most stat. Formats and returns a tweet.
import random
from decimal import *
# team_most_per
# Caluclates the most stat per stat.
# INPUT: stats, player_dicts
# The list of stats and the list of players' stat dictionaries.
# OUTPUT: most_stat, most_player, tied_players, category1, category2
# The most calculated stat per stat, the name of the player
# with the most stat, a list that may contain the name of a player
# tied for most stat, and the two categories used to calculate the most
# stat per stat.
# (list, list) -> (float, str, list, str, str)
def team_most_per(stats, player_dicts):
# Generate two random stats categories.
category1 = stats[random.randint(0, len(stats) - 1)]
category2 = stats[random.randint(0, len(stats) - 1)]
# Re-generate if the chosen categories are the same.
if category1 == category2:
category1 = stats[random.randint(0, len(stats) - 1)]
category2 = stats[random.randint(0, len(stats) - 1)]
# Initalize empty lists to catch stats for all teams.
stat1 = [0, 0, 0, 0, 0]
stat2 = [0, 0, 0, 0, 0]
# statX[0] -> BOS
# statX[1] -> BRA
# statX[2] -> CAL
# statX[3] -> MON
# statX[4] -> TOR
for d in player_dicts:
if d['TEAM'] == 'BOS':
stat1[0] += d[category1]
stat2[0] += d[category2]
elif d['TEAM'] == 'BRA':
stat1[1] += d[category1]
stat2[1] += d[category2]
elif d['TEAM'] == 'CAL':
stat1[2] += d[category1]
stat2[2] += d[category2]
elif d['TEAM'] == 'MON':
stat1[3] += d[category1]
stat2[3] += d[category2]
elif d['TEAM'] == 'TOR':
stat1[4] += d[category1]
stat2[4] += d[category2]
# Initialize empty variables to catch most values.
most_stat = 0
most_team = None
tied_teams = []
# Loop through all team stats.
for i in range(len(stat1)):
# Avoid division by 0.
if stat2[i] > 0:
# Calculate new stat!
stat = stat1[i] / stat2[i]
if stat > most_stat:
most_stat = stat
most_team = i
tied_teams = []
elif stat == most_stat:
tied_teams.append(i)
# If there are more than two teams tied or if there was no
# team calculated, recalculate.
if len(tied_teams) > 1 or most_team == None:
# TODO: write intersting output to a file
most_stat, most_team, tied_teams, category1, category2 = team_most_per(stats, player_dicts)
return most_stat, most_team, tied_teams, category1, category2
# tweet
# Formats and returns the tweet.
# INPUT: stats, player_dicts, abbreviations
# The list of stats, the list of players' stat dictionaries, and the
# dictonary of abbreviations for the various stats.
# OUTPUT: The string of the tweet.
def tweet(stats, player_dicts, abbreviations):
# Get the data for the tweet.
stat, team, tied, stat1, stat2 = team_most_per(stats, player_dicts)
# Team abbreviations key.
teams = ['Boston Blades', 'Brampton Thunder', 'Calgary Inferno', 'Canadiennes de Montreal', 'Toronto Furies']
# Make the tweet.
# No tied teams.
if len(tied) == 0:
# If the stat has more than 5 decimal places, truncate it.
if abs(Decimal(str(stat)).as_tuple().exponent) > 5:
tweet = ('2015-16 Most ' + abbreviations[stat1] + ' per ' + abbreviations[stat2] + ':' + '\n' + 'The ' + teams[team] + ' with ' + '{:.3f}' + ' ' + stat1 + ' per ' + stat2).format(stat)
else:
tweet = '2015-16 Most ' + abbreviations[stat1] + ' per ' + abbreviations[stat2] + ':' + '\n' + 'The ' + teams[team] + ' with ' + str(stat) + ' ' + stat1 + ' per ' + stat2
# Two tied teams.
elif len(tied) == 2:
# If the stat has more than 5 decimal places, truncate it.
if abs(Decimal(str(stat)).as_tuple().exponent) > 5:
tweet = ('2015-16 Most ' + abbreviations[stat1] + ' per ' + abbreviations[stat2] + ':' + '\n' + 'The ' + teams[team] + ' and the ' + teams[tied[0]] + ' with ' + '{:.3f}' + ' ' + stat1 + ' per ' + stat2).format(stat)
else:
tweet = '2015-16 Most ' + abbreviations[stat1] + ' per ' + abbreviations[stat2] + ':' + '\n' + 'The ' + teams[team] + ' and the ' + teams[tied[0]] + ' with ' + str(stat) + ' ' + stat1 + ' per ' + stat2
return tweet
| 37.209677
| 227
| 0.587126
|
eec954a5fb777a920b3689466a40b726008164f7
| 343
|
py
|
Python
|
projects/learning-journal/brain-bit-ingestor/tests/event_hub/test_send_event.py
|
DEV3L/archive
|
652e37bf949cfcb2174b97ed5b7dbb6285a8dbe8
|
[
"Beerware"
] | null | null | null |
projects/learning-journal/brain-bit-ingestor/tests/event_hub/test_send_event.py
|
DEV3L/archive
|
652e37bf949cfcb2174b97ed5b7dbb6285a8dbe8
|
[
"Beerware"
] | null | null | null |
projects/learning-journal/brain-bit-ingestor/tests/event_hub/test_send_event.py
|
DEV3L/archive
|
652e37bf949cfcb2174b97ed5b7dbb6285a8dbe8
|
[
"Beerware"
] | null | null | null |
from azure.eventhub import EventData
from ingestor.builders.build_sender_client import build_sender_client
message = "Hello, World!"
def test_send():
expected_outcome = 'Ok'
client, sender = build_sender_client()
send_result = sender.send(EventData(message))
client.stop()
assert expected_outcome == send_result.name
| 21.4375
| 69
| 0.752187
|
79766b2042464683cf452bad747d99c7a3f5f3e0
| 97
|
py
|
Python
|
genhmm1d/__init__.py
|
mamadouyamar/GenHMM1d
|
dc76946938b53be32e26218c21a373bc6663b13a
|
[
"MIT"
] | null | null | null |
genhmm1d/__init__.py
|
mamadouyamar/GenHMM1d
|
dc76946938b53be32e26218c21a373bc6663b13a
|
[
"MIT"
] | null | null | null |
genhmm1d/__init__.py
|
mamadouyamar/GenHMM1d
|
dc76946938b53be32e26218c21a373bc6663b13a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 22 20:56:14 2020
@author: 49009427
"""
| 10.777778
| 36
| 0.525773
|
79acce0e33a3d421696697b9ff0a24aca0da26be
| 1,700
|
py
|
Python
|
pommermanLearn/params.py
|
FrankPfirmann/playground
|
9379cbfd57d98eadcaf7b00d777434490a536540
|
[
"Apache-2.0"
] | null | null | null |
pommermanLearn/params.py
|
FrankPfirmann/playground
|
9379cbfd57d98eadcaf7b00d777434490a536540
|
[
"Apache-2.0"
] | null | null | null |
pommermanLearn/params.py
|
FrankPfirmann/playground
|
9379cbfd57d98eadcaf7b00d777434490a536540
|
[
"Apache-2.0"
] | 2
|
2021-12-01T12:03:50.000Z
|
2022-03-22T16:37:34.000Z
|
from datetime import datetime
import torch
# train_dqn.py
num_iterations = 500
episodes_per_iter = 1
gradient_steps_per_iter = 100
batch_size = 128
episodes_per_eval = 50
intermediate_test = 100
centralize_planes = False
render_tests = False
env = 'PommeRadio-v2' # PommeFFACompetition-v0, OneVsOne-v0. PommeTeamCompetition-v0, PommeRadio-v2, custom-v2, custom2-v2
p_observable = True
crop_fog = False
#rainbow_dqn
double_q = True
prioritized_replay = True
device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda")
run_name = datetime.now().strftime("%Y%m%dT%H%M%S")
alpha = 0.7
beta = 0.7 # determines how replays should be weighted (beta==0 --> all weights are 1, beta==1 --> influence of replays is fully normalized)
#categorical DQNc
categorical = False
atom_size = 51
v_min = -1
v_max = 1
dueling = True
#noisy layers should replace epsilon greedy exploration
noisy = True
#n-step
use_nstep = True
nsteps = 10
#train_agent.py
communicate = 2
use_memory = True
#dqn.py
seed = 1 # -1 for disabling setting a seed
gamma = 0.99
tau = 0.005
lr_q = 0.0003
exploration_noise = 0.00
#data_generator.py
set_position = False
replay_size = 2**16 # must be a power of 2 to be compatible with prioritized replay
max_steps = 800
#rewards.py
reward_func = 'SkynetReward' #SkynetReward, BombReward
fifo_size = 64
kill_rwd = 0.5
teamkill_rwd = -0.5
death_rwd = -0.5
win_loss_bonus = 0.5
step_rwd = 0.001
item_rwd = 0.03
bomb_tracker=True
#models.py
memory_method = 'forgetting' # one of 'counting', 'forgetting'
forgetfullness = 0.01
normalize_steps = True
def validate():
if use_memory: assert p_observable and not crop_fog
if communicate: assert use_memory
| 22.077922
| 140
| 0.754118
|
dbead757a82e48ed720ac509837be907e1c84ca5
| 29,199
|
py
|
Python
|
src/tests/ftest/util/dmg_utils_base.py
|
zalsader/daos
|
38ec717e8380758dce844ae905719c225f8884d6
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
src/tests/ftest/util/dmg_utils_base.py
|
zalsader/daos
|
38ec717e8380758dce844ae905719c225f8884d6
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
src/tests/ftest/util/dmg_utils_base.py
|
zalsader/daos
|
38ec717e8380758dce844ae905719c225f8884d6
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
#!/usr/bin/python
"""
(C) Copyright 2020-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from socket import gethostname
from command_utils_base import \
FormattedParameter, CommandWithParameters
from command_utils import CommandWithSubCommand, YamlCommand
class DmgCommandBase(YamlCommand):
"""Defines a base object representing a dmg command."""
def __init__(self, path, yaml_cfg=None):
"""Create a dmg Command object.
Args:
path (str): path to the dmg command
yaml_cfg (DmgYamlParameters, optional): dmg config file
settings. Defaults to None, in which case settings
must be supplied as command-line parameters.
"""
super().__init__("/run/dmg/*", "dmg", path, yaml_cfg)
# If running dmg on remote hosts, this list needs to include those hosts
self.temporary_file_hosts = gethostname().split(".")[0:1]
# If specified use the configuration file from the YamlParameters object
default_yaml_file = None
if self.yaml is not None and hasattr(self.yaml, "filename"):
default_yaml_file = self.yaml.filename
self._hostlist = FormattedParameter("-l {}")
self.hostfile = FormattedParameter("-f {}")
self.configpath = FormattedParameter("-o {}", default_yaml_file)
self.insecure = FormattedParameter("-i", False)
self.debug = FormattedParameter("-d", True)
self.json = FormattedParameter("-j", False)
@property
def hostlist(self):
"""Get the hostlist that was set.
Returns a string list.
"""
if self.yaml:
hosts = self.yaml.hostlist.value
else:
hosts = self._hostlist.value.split(",")
return hosts
@hostlist.setter
def hostlist(self, hostlist):
"""Set the hostlist to be used for dmg invocation.
Args:
hostlist (string list): list of host addresses
"""
if self.yaml:
if not isinstance(hostlist, list):
hostlist = hostlist.split(",")
self.yaml.hostlist.update(hostlist, "dmg.yaml.hostlist")
else:
if isinstance(hostlist, list):
hostlist = ",".join(hostlist)
self._hostlist.update(hostlist, "dmg._hostlist")
def get_sub_command_class(self):
# pylint: disable=redefined-variable-type
"""Get the dmg sub command object based upon the sub-command."""
if self.sub_command.value == "network":
self.sub_command_class = self.NetworkSubCommand()
elif self.sub_command.value == "pool":
self.sub_command_class = self.PoolSubCommand()
elif self.sub_command.value == "storage":
self.sub_command_class = self.StorageSubCommand()
elif self.sub_command.value == "system":
self.sub_command_class = self.SystemSubCommand()
elif self.sub_command.value == "cont":
self.sub_command_class = self.ContSubCommand()
elif self.sub_command.value == "config":
self.sub_command_class = self.ConfigSubCommand()
elif self.sub_command.value == "telemetry":
self.sub_command_class = self.TelemetrySubCommand()
elif self.sub_command.value == "version":
self.sub_command_class = self.VersionSubCommand()
else:
self.sub_command_class = None
class ConfigSubCommand(CommandWithSubCommand):
"""Defines an object for the dmg config sub command."""
def __init__(self):
"""Create a dmg config subcommand object."""
super(DmgCommandBase.ConfigSubCommand, self).__init__(
"run/dmg/config/*", "config")
def get_sub_command_class(self):
# pylint: disable=redefined-variable-type
"""Get the dmg config sub command object."""
if self.sub_command.value == "generate":
self.sub_command_class = self.GenerateSubCommand()
else:
self.sub_command_class = None
class GenerateSubCommand(CommandWithParameters):
"""Defines an object for the dmg config generate command."""
def __init__(self):
"""Create a dmg config generate object."""
super(
DmgCommandBase.ConfigSubCommand.GenerateSubCommand,
self).__init__(
"/run/dmg/config/generate/*", "generate")
self.access_points = FormattedParameter(
"--access-points={}", None)
self.num_engines = FormattedParameter("--num-engines={}", None)
self.min_ssds = FormattedParameter("--min-ssds={}", None)
self.net_class = FormattedParameter("--net-class={}", None)
class ContSubCommand(CommandWithSubCommand):
"""Defines an object for the dmg cont sub command."""
def __init__(self):
"""Create a dmg cont subcommand object."""
super().__init__("/run/dmg/cont/*", "cont")
def get_sub_command_class(self):
# pylint: disable=redefined-variable-type
"""Get the dmg cont sub command object."""
if self.sub_command.value == "set-owner":
self.sub_command_class = self.SetownerSubCommand()
else:
self.sub_command_class = None
class SetownerSubCommand(CommandWithParameters):
"""Defines an object for the dmg cont set-owner command."""
def __init__(self):
"""Create a dmg cont set-owner command object."""
super().__init__("/run/dmg/cont/set-owner/*", "set-owner")
self.pool = FormattedParameter("--pool={}", None)
self.cont = FormattedParameter("--cont={}", None)
self.user = FormattedParameter("--user={}", None)
self.group = FormattedParameter("--group={}", None)
class NetworkSubCommand(CommandWithSubCommand):
"""Defines an object for the dmg network sub command."""
def __init__(self):
"""Create a dmg network subcommand object."""
super().__init__("/run/dmg/network/*", "network")
def get_sub_command_class(self):
# pylint: disable=redefined-variable-type
"""Get the dmg network sub command object."""
if self.sub_command.value == "scan":
self.sub_command_class = self.ScanSubCommand()
else:
self.sub_command_class = None
class ScanSubCommand(CommandWithParameters):
"""Defines an object for the dmg network scan command."""
def __init__(self):
"""Create a dmg network scan command object."""
super().__init__("/run/dmg/network/scan/*", "scan")
self.provider = FormattedParameter("-p {}", None)
class PoolSubCommand(CommandWithSubCommand):
"""Defines an object for the dmg pool sub command."""
def __init__(self):
"""Create a dmg pool subcommand object."""
super().__init__("/run/dmg/pool/*", "pool")
def get_sub_command_class(self):
# pylint: disable=redefined-variable-type
"""Get the dmg pool sub command object."""
if self.sub_command.value == "create":
self.sub_command_class = self.CreateSubCommand()
elif self.sub_command.value == "delete-acl":
self.sub_command_class = self.DeleteAclSubCommand()
elif self.sub_command.value == "destroy":
self.sub_command_class = self.DestroySubCommand()
elif self.sub_command.value == "get-acl":
self.sub_command_class = self.GetAclSubCommand()
elif self.sub_command.value == "list":
self.sub_command_class = self.ListSubCommand()
elif self.sub_command.value == "overwrite-acl":
self.sub_command_class = self.OverwriteAclSubCommand()
elif self.sub_command.value == "query":
self.sub_command_class = self.QuerySubCommand()
elif self.sub_command.value == "set-prop":
self.sub_command_class = self.SetPropSubCommand()
elif self.sub_command.value == "update-acl":
self.sub_command_class = self.UpdateAclSubCommand()
elif self.sub_command.value == "exclude":
self.sub_command_class = self.ExcludeSubCommand()
elif self.sub_command.value == "extend":
self.sub_command_class = self.ExtendSubCommand()
elif self.sub_command.value == "drain":
self.sub_command_class = self.DrainSubCommand()
elif self.sub_command.value == "reintegrate":
self.sub_command_class = self.ReintegrateSubCommand()
elif self.sub_command.value == "evict":
self.sub_command_class = self.EvictSubCommand()
else:
self.sub_command_class = None
class CreateSubCommand(CommandWithParameters):
"""Defines an object for the dmg pool create command."""
def __init__(self):
"""Create a dmg pool create command object."""
super().__init__("/run/dmg/pool/create/*", "create")
self.group = FormattedParameter("--group={}", None)
self.user = FormattedParameter("--user={}", None)
self.acl_file = FormattedParameter("--acl-file={}", None)
self.size = FormattedParameter("--size={}", None)
self.tier_ratio = FormattedParameter("--tier-ratio={}", None)
self.scm_size = FormattedParameter("--scm-size={}", None)
self.nvme_size = FormattedParameter("--nvme-size={}", None)
self.ranks = FormattedParameter("--ranks={}", None)
self.nsvc = FormattedParameter("--nsvc={}", None)
self.sys = FormattedParameter("--sys={}", None)
self.properties = FormattedParameter("--properties={}", None)
self.label = FormattedParameter("--label={}", None)
self.nranks = FormattedParameter("--nranks={}", None)
class ExcludeSubCommand(CommandWithParameters):
"""Defines an object for the dmg pool exclude command."""
def __init__(self):
"""Create a dmg pool exclude command object."""
super().__init__("/run/dmg/pool/exclude/*", "exclude")
self.pool = FormattedParameter("{}", None)
self.rank = FormattedParameter("--rank={}", None)
self.tgt_idx = FormattedParameter("--target-idx={}", None)
class ExtendSubCommand(CommandWithParameters):
"""Defines an object for the dmg pool extend command."""
def __init__(self):
"""Create a dmg pool extend command object."""
super().__init__("/run/dmg/pool/extend/*", "extend")
self.pool = FormattedParameter("{}", None)
self.ranks = FormattedParameter("--ranks={}", None)
class DrainSubCommand(CommandWithParameters):
"""Defines an object for the dmg pool drain command."""
def __init__(self):
"""Create a dmg pool drain command object."""
super().__init__("/run/dmg/pool/drain/*", "drain")
self.pool = FormattedParameter("{}", None)
self.rank = FormattedParameter("--rank={}", None)
self.tgt_idx = FormattedParameter("--target-idx={}", None)
class ReintegrateSubCommand(CommandWithParameters):
"""Defines an object for dmg pool reintegrate command."""
def __init__(self):
"""Create a dmg pool reintegrate command object."""
super().__init__("/run/dmg/pool/reintegrate/*", "reintegrate")
self.pool = FormattedParameter("{}", None)
self.rank = FormattedParameter("--rank={}", None)
self.tgt_idx = FormattedParameter("--target-idx={}", None)
class DeleteAclSubCommand(CommandWithParameters):
"""Defines an object for the dmg pool delete-acl command."""
def __init__(self):
"""Create a dmg pool delete-acl command object."""
super().__init__("/run/dmg/pool/delete-acl/*", "delete-acl")
self.pool = FormattedParameter("{}", None)
self.principal = FormattedParameter("-p {}", None)
class DestroySubCommand(CommandWithParameters):
"""Defines an object for the dmg pool destroy command."""
def __init__(self):
"""Create a dmg pool destroy command object."""
super().__init__("/run/dmg/pool/destroy/*", "destroy")
self.pool = FormattedParameter("{}", None)
self.sys_name = FormattedParameter("--sys-name={}", None)
self.force = FormattedParameter("--force", False)
class GetAclSubCommand(CommandWithParameters):
"""Defines an object for the dmg pool get-acl command."""
def __init__(self):
"""Create a dmg pool get-acl command object."""
super().__init__("/run/dmg/pool/get-acl/*", "get-acl")
self.pool = FormattedParameter("{}", None)
class ListSubCommand(CommandWithParameters):
"""Defines an object for the dmg pool list command."""
def __init__(self):
"""Create a dmg pool list command object."""
super().__init__("/run/dmg/pool/list/*", "list")
self.no_query = FormattedParameter("--no-query", False)
self.verbose = FormattedParameter("--verbose", False)
class OverwriteAclSubCommand(CommandWithParameters):
"""Defines an object for the dmg pool overwrite-acl command."""
def __init__(self):
"""Create a dmg pool overwrite-acl command object."""
super().__init__(
"/run/dmg/pool/overwrite-acl/*", "overwrite-acl")
self.pool = FormattedParameter("{}", None)
self.acl_file = FormattedParameter("-a {}", None)
class QuerySubCommand(CommandWithParameters):
"""Defines an object for the dmg pool query command."""
def __init__(self):
"""Create a dmg pool query command object."""
super().__init__("/run/dmg/pool/query/*", "query")
self.pool = FormattedParameter("{}", None)
class SetPropSubCommand(CommandWithParameters):
"""Defines an object for the dmg pool set-prop command."""
def __init__(self):
"""Create a dmg pool set-prop command object."""
super().__init__("/run/dmg/pool/set-prop/*", "set-prop")
self.pool = FormattedParameter("{}", None)
self.name = FormattedParameter("--name={}", None)
self.value = FormattedParameter("--value={}", None)
class UpdateAclSubCommand(CommandWithParameters):
"""Defines an object for the dmg pool update-acl command."""
def __init__(self):
"""Create a dmg pool update-acl command object."""
super().__init__("/run/dmg/pool/update-acl/*", "update-acl")
self.pool = FormattedParameter("{}", None)
self.acl_file = FormattedParameter("-a {}", None)
self.entry = FormattedParameter("-e {}", None)
class EvictSubCommand(CommandWithParameters):
"""Defines an object for the dmg pool evict command."""
def __init__(self):
"""Create a dmg pool evict command object."""
super().__init__("/run/dmg/pool/evict/*", "evict")
self.pool = FormattedParameter("{}", None)
self.sys = FormattedParameter("--sys={}", None)
class StorageSubCommand(CommandWithSubCommand):
"""Defines an object for the dmg storage sub command."""
def __init__(self):
"""Create a dmg storage subcommand object."""
super().__init__("/run/dmg/storage/*", "storage")
def get_sub_command_class(self):
# pylint: disable=redefined-variable-type
"""Get the dmg storage sub command object."""
if self.sub_command.value == "format":
self.sub_command_class = self.FormatSubCommand()
elif self.sub_command.value == "query":
self.sub_command_class = self.QuerySubCommand()
elif self.sub_command.value == "scan":
self.sub_command_class = self.ScanSubCommand()
elif self.sub_command.value == "set":
self.sub_command_class = self.SetSubCommand()
else:
self.sub_command_class = None
class FormatSubCommand(CommandWithParameters):
"""Defines an object for the dmg storage format command."""
def __init__(self):
"""Create a dmg storage format command object."""
super().__init__("/run/dmg/storage/format/*", "format")
self.verbose = FormattedParameter("--verbose", False)
self.force = FormattedParameter("--force", False)
class QuerySubCommand(CommandWithSubCommand):
"""Defines an object for the dmg storage query command."""
def __init__(self):
"""Create a dmg storage query command object."""
super().__init__("/run/dmg/storage/query/*", "query")
def get_sub_command_class(self):
# pylint: disable=redefined-variable-type
"""Get the dmg storage query sub command object."""
if self.sub_command.value == "target-health":
self.sub_command_class = self.TargetHealthSubCommand()
elif self.sub_command.value == "device-health":
self.sub_command_class = self.DeviceHealthSubCommand()
elif self.sub_command.value == "list-devices":
self.sub_command_class = self.ListDevicesSubCommand()
elif self.sub_command.value == "list-pools":
self.sub_command_class = self.ListPoolsSubCommand()
else:
self.sub_command_class = None
class TargetHealthSubCommand(CommandWithParameters):
"""Defines a dmg storage query target-health object."""
def __init__(self):
"""Create a dmg storage query target-health object."""
super().__init__(
"/run/dmg/storage/query/target-health/*",
"target-health")
self.rank = FormattedParameter("-r {}", None)
self.tgtid = FormattedParameter("-t {}", None)
class DeviceHealthSubCommand(CommandWithParameters):
"""Defines a dmg storage query device-health object."""
def __init__(self):
"""Create a dmg storage query device-health object."""
super().__init__(
"/run/dmg/storage/query/device-health/*",
"device-health")
self.uuid = FormattedParameter("-u {}", None)
class ListDevicesSubCommand(CommandWithParameters):
"""Defines a dmg storage query list-devices object."""
def __init__(self):
"""Create a dmg storage query list-devices object."""
super().__init__(
"/run/dmg/storage/query/list-devices/*",
"list-devices")
self.rank = FormattedParameter("-r {}", None)
self.uuid = FormattedParameter("-u {}", None)
self.health = FormattedParameter("-b", False)
class ListPoolsSubCommand(CommandWithParameters):
"""Defines a dmg storage query list-pools object."""
def __init__(self):
"""Create a dmg storage query list-pools object."""
super().__init__(
"/run/dmg/storage/query/list-pools/*",
"list-pools")
self.rank = FormattedParameter("-r {}", None)
self.uuid = FormattedParameter("-u {}", None)
self.verbose = FormattedParameter("--verbose", False)
class ScanSubCommand(CommandWithParameters):
"""Defines an object for the dmg storage scan command."""
def __init__(self):
"""Create a dmg storage scan command object."""
super().__init__("/run/dmg/storage/scan/*", "scan")
self.nvme_health = FormattedParameter("--nvme-health", False)
self.verbose = FormattedParameter("--verbose", False)
class SetSubCommand(CommandWithSubCommand):
"""Defines an object for the dmg storage set command."""
def __init__(self):
"""Create a dmg storage set command object."""
super().__init__("/run/dmg/storage/set/*", "set")
def get_sub_command_class(self):
# pylint: disable=redefined-variable-type
"""Get the dmg set sub command object."""
if self.sub_command.value == "nvme-faulty":
self.sub_command_class = self.NvmeFaultySubCommand()
else:
self.sub_command_class = None
class NvmeFaultySubCommand(CommandWithParameters):
"""Defines a dmg storage set nvme-faulty object."""
def __init__(self):
"""Create a dmg storage set nvme-faulty object."""
super().__init__(
"/run/dmg/storage/query/device-state/*",
"nvme-faulty")
self.uuid = FormattedParameter("-u {}", None)
self.force = FormattedParameter("--force", False)
class SystemSubCommand(CommandWithSubCommand):
"""Defines an object for the dmg system sub command."""
def __init__(self):
"""Create a dmg system subcommand object."""
super().__init__("/run/dmg/system/*", "system")
def get_sub_command_class(self):
# pylint: disable=redefined-variable-type
"""Get the dmg system sub command object."""
if self.sub_command.value == "leader-query":
self.sub_command_class = self.LeaderQuerySubCommand()
elif self.sub_command.value == "list-pools":
self.sub_command_class = self.ListPoolsSubCommand()
elif self.sub_command.value == "query":
self.sub_command_class = self.QuerySubCommand()
elif self.sub_command.value == "start":
self.sub_command_class = self.StartSubCommand()
elif self.sub_command.value == "stop":
self.sub_command_class = self.StopSubCommand()
elif self.sub_command.value == "erase":
self.sub_command_class = self.EraseSubCommand()
elif self.sub_command.value == "cleanup":
self.sub_command_class = self.CleanupSubCommand()
else:
self.sub_command_class = None
class LeaderQuerySubCommand(CommandWithParameters):
"""Defines an object for the dmg system leader-query command."""
def __init__(self):
"""Create a dmg system leader-query command object."""
super().__init__(
"/run/dmg/system/leader-query/*", "leader-query")
class ListPoolsSubCommand(CommandWithParameters):
"""Defines an object for the dmg system list-pools command."""
def __init__(self):
"""Create a dmg system list-pools command object."""
super().__init__("/run/dmg/system/list-pools/*", "list-pools")
class QuerySubCommand(CommandWithParameters):
"""Defines an object for the dmg system query command."""
def __init__(self):
"""Create a dmg system query command object."""
super().__init__("/run/dmg/system/query/*", "query")
self.ranks = FormattedParameter("--ranks={}")
self.verbose = FormattedParameter("--verbose", False)
class CleanupSubCommand(CommandWithParameters):
"""Defines an object for the dmg system cleanup command."""
def __init__(self):
"""Create a dmg system cleanup command object."""
super().__init__("/run/dmg/system/cleanup/*", "cleanup")
self.machinename = FormattedParameter("{}", None)
self.verbose = FormattedParameter("--verbose", False)
class StartSubCommand(CommandWithParameters):
"""Defines an object for the dmg system start command."""
def __init__(self):
"""Create a dmg system start command object."""
super().__init__("/run/dmg/system/start/*", "start")
self.ranks = FormattedParameter("--ranks={}")
self.rank_hosts = FormattedParameter("--rank-hosts={}")
class StopSubCommand(CommandWithParameters):
"""Defines an object for the dmg system stop command."""
def __init__(self):
"""Create a dmg system stop command object."""
super().__init__("/run/dmg/system/stop/*", "stop")
self.force = FormattedParameter("--force", False)
self.ranks = FormattedParameter("--ranks={}")
class EraseSubCommand(CommandWithParameters):
"""Defines an object for the dmg system erase command."""
def __init__(self):
"""Create a dmg system erase command object."""
super().__init__(
"/run/dmg/system/erase/*", "erase")
class TelemetrySubCommand(CommandWithSubCommand):
"""Defines an object for the dmg telemetry sub command."""
def __init__(self):
"""Create a dmg telemetry subcommand object."""
super().__init__("/run/dmg/telemetry/*", "telemetry")
def get_sub_command_class(self):
# pylint: disable=redefined-variable-type
"""Get the dmg telemetry sub command object."""
if self.sub_command.value == "metrics":
self.sub_command_class = self.MetricsSubCommand()
else:
self.sub_command_class = None
class MetricsSubCommand(CommandWithSubCommand):
"""Defines an object for the dmg telemetry metrics command."""
def __init__(self):
"""Create a dmg telemetry metrics command object."""
super().__init__("/run/dmg/telemetry/metrics/*", "metrics")
def get_sub_command_class(self):
# pylint: disable=redefined-variable-type
"""Get the dmg telemetry metrics sub command object."""
if self.sub_command.value == "list":
self.sub_command_class = self.ListSubCommand()
elif self.sub_command.value == "query":
self.sub_command_class = self.QuerySubCommand()
else:
self.sub_command_class = None
class ListSubCommand(CommandWithParameters):
"""Defines a dmg telemetry metrics list object."""
def __init__(self):
"""Create a dmg telemetry metrics list object."""
super().__init__(
"/run/dmg/telemetry/metrics/list/*", "list")
self.host = FormattedParameter("--host-list={}", None)
self.port = FormattedParameter("--port={}", None)
class QuerySubCommand(CommandWithParameters):
"""Defines a dmg telemetry metrics query object."""
def __init__(self):
"""Create a dmg telemetry metrics query object."""
super().__init__(
"/run/dmg/telemetry/metrics/query/*", "query")
self.host = FormattedParameter("--host-list={}", None)
self.port = FormattedParameter("--port={}", None)
self.metrics = FormattedParameter("--metrics={}", None)
class VersionSubCommand(CommandWithSubCommand):
"""Defines an object for the dmg version sub command."""
def __init__(self):
"""Create a dmg version subcommand object."""
super(DmgCommandBase.VersionSubCommand, self).__init__(
"/run/dmg/version/*", "version")
| 46.127962
| 80
| 0.568924
|
0fee08732c5ecdc0064f501763a1486fbf9963ea
| 1,170
|
py
|
Python
|
xidplus/numpyro_fit/misc.py
|
MCarmenCampos/XID_plus
|
c031366b48486d229ac96d4eb4f547faf5227c25
|
[
"MIT"
] | null | null | null |
xidplus/numpyro_fit/misc.py
|
MCarmenCampos/XID_plus
|
c031366b48486d229ac96d4eb4f547faf5227c25
|
[
"MIT"
] | null | null | null |
xidplus/numpyro_fit/misc.py
|
MCarmenCampos/XID_plus
|
c031366b48486d229ac96d4eb4f547faf5227c25
|
[
"MIT"
] | null | null | null |
import jax
import pickle
import functools
from xidplus.numpyro_fit.neuralnet_models import CIGALE_emulator, CIGALE_emulator_kasia
import numpy as np
@functools.partial(jax.jit, static_argnums=(2))
def sp_matmul(A, B, shape):
"""
http://gcucurull.github.io/deep-learning/2020/06/03/jax-sparse-matrix-multiplication/
Arguments:
A: (N, M) sparse matrix represented as a tuple (indexes, values)
B: (M,K) dense matrix
shape: value of N
Returns:
(N, K) dense matrix
"""
#assert B.ndim == 2
indexes, values = A
rows, cols = indexes
in_ = B.take(cols, axis=-2)
prod = in_*values[:, None]
res = jax.ops.segment_sum(prod, rows, shape)
return res
def load_emulator(filename):
#read in net params
x=np.load(filename, allow_pickle=True)
net_init,net_apply=CIGALE_emulator()
return {'net_init':net_init,'net_apply':net_apply,'params':x['arr_0'].tolist()}
def load_emulatorII(filename):
#read in net params
x=np.load(filename, allow_pickle=True)
net_init,net_apply=CIGALE_emulator_kasia()
return {'net_init':net_init,'net_apply':net_apply,'params':x['arr_0'].tolist()}
| 30.789474
| 89
| 0.688034
|
23d5d455f36f59d5f4aa798163329fbe27f7a6e5
| 13,299
|
py
|
Python
|
edr_explorer/interface.py
|
informatics-lab/edr_explorer
|
80b0572e73569e5106d0a80ca44cc28c22f15974
|
[
"BSD-3-Clause"
] | null | null | null |
edr_explorer/interface.py
|
informatics-lab/edr_explorer
|
80b0572e73569e5106d0a80ca44cc28c22f15974
|
[
"BSD-3-Clause"
] | 31
|
2021-11-12T16:54:05.000Z
|
2022-03-25T14:10:07.000Z
|
edr_explorer/interface.py
|
informatics-lab/edr_explorer
|
80b0572e73569e5106d0a80ca44cc28c22f15974
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from edr_explorer.data import DataHandler
from .data import DataHandler
from .lookup import CRS_LOOKUP, TRS_LOOKUP
from .util import dict_list_search, get_request, ISO8601Expander
class EDRInterface(object):
"""
An interface to an EDR Server that can navigate the query structure to
return data payloads in CoverageJSON format from the EDR Server.
"""
_collections_query_str = "collections/?f=json"
_query_str = "collections/{coll_id}/{query_type}?{query_str}"
def __init__(self, server_host):
"""
Construct an interface to an EDR Server accessible at the URI specified in `server_host`
and request the `collections` metadata from the server.
"""
self.server_host = server_host
self._errors = None
self._data_handler = None
self.json = self._get_covjson(self._collections_query_str)
self.collections = self._get_collections()
self.collection_ids = self._get_collection_ids()
self.collection_titles = self._get_collection_titles()
@property
def errors(self):
return self._errors
@errors.setter
def errors(self, value):
self._errors = value
@property
def data_handler(self):
return self._data_handler
@data_handler.setter
def data_handler(self, value):
self._data_handler = value
def __repr__(self):
n_colls = len(self.collections)
max_id_len = max([len(c_id) for c_id in self.collection_ids])
str_result = f"EDR Interface to {n_colls} collection{'s' if n_colls>1 else ''}:\n"
header = f" # {'ID'.ljust(max_id_len, ' ')} Title\n"
str_result += header
line = " {i} {c_id} {c_title}\n"
for i in range(len(self.collection_ids)):
c_id_str = self.collection_ids[i]
c_id_block = c_id_str.ljust(max_id_len-len(c_id_str)+2, ' ')
str_result += line.format(i=i, c_id=c_id_block, c_title=self.collection_titles[i])
return str_result
def _get_covjson(self, query_str, full_uri=False):
"""Make a request to the EDR Server and return the (coverage) JSON response."""
self.errors = None
if full_uri:
uri = query_str
else:
uri = f"{self.server_host}/{query_str}"
result, status_code, errors = get_request(uri)
if errors is not None:
emsg = errors
if status_code is not None:
emsg += f" ({status_code})"
self.errors = emsg
return result
def _get_collections(self):
return self.json["collections"] if self.json is not None else None
def _get_collection_ids(self):
return [c["id"] for c in self.collections] if self.json is not None else None
def _get_collection_titles(self):
return [c["title"] for c in self.collections] if self.json is not None else None
def _get_link(self, coll_id, key, query_ref):
"""
Retrieve a link url embedded in collection metadata.
Typically these links describe how to retrieve specific data from the server.
"""
coll = self.get_collection(coll_id)
if key == "links":
links = [itm["href"] for itm in coll[key]]
if isinstance(query_ref, int):
link = links[query_ref]
elif isinstance(query_ref, str):
link = filter(lambda l: query_ref in l, links)
else:
raise KeyError(f"Invalid link reference: {query_ref!r} (type {type(query_ref)}.)")
elif key == "data_queries":
link = coll[key][query_ref]["link"]["href"]
else:
raise ValueError(f"Cannot extract links from collection key {key!r}.")
return link
def _get_locations_json(self, keys):
"""Get JSON data from the server from a `locations` query."""
coll = self.get_collection(keys)
locs_query_uri = self._get_link(keys, "data_queries", "locations")
named_query_uri = locs_query_uri.replace("name", coll["id"])
return self._get_covjson(named_query_uri, full_uri=True)
def _handle_label(self, label_item):
"""
Labels in EDR can either be provided directly, or in a dict with one or more
locales; respectively:
* "label": "my_label", or
* "label": {"en": "my_label", ...}
This helper handles either provision, and returns the locale-specific
label, if provided by the server.
"""
locale = "en" # This could be set globally in future.
try:
label = label_item.get(locale)
except AttributeError:
label = label_item
return label
def get_collection(self, keys):
"""
Get the JSON metadata of a specific collection in the list of collections
provided by the EDR Server and listed in the response from the `collections`
query. The specific collection is selected by the value of `keys`, which may
be one of the following:
* an int describing the index of the collection in the list of collections
* a string containing the value of the `id` parameter of a collection
* a string containing the value of the `title` parameter of a collection
"""
idx = None
if isinstance(keys, int):
idx = keys
# XXX this could be replaced with `dict_list_search`.
else:
for i, coll in enumerate(self.collections):
coll_keys = [coll["id"], coll["title"]]
if keys in coll_keys:
idx = i
if idx is None:
emsg = f"Collection {keys!r} could not be found."
raise KeyError(emsg)
return self.collections[idx]
def get_locations(self, keys):
"""
Make a `locations` request to the EDR Server and return a list of IDs of defined
locations in the collection defined by `keys`.
"""
result = [None]
if "locations" in self.get_query_types(keys):
locs_json = self._get_locations_json(keys)
result = [d["id"] for d in locs_json["features"]]
return result
def get_location_extents(self, keys, feature_id):
"""
Make a `locations` request to the EDR Server and return the bounding-box
geometry of a specific location defined by:
* the collection specified by `keys`
* the location specified by `feature_id`.
"""
locs_json = self._get_locations_json(keys)
feature_json = dict_list_search(locs_json["features"], "id", feature_id)
return feature_json["geometry"]
def get_spatial_extent(self, keys):
"""
Return the spatial (bounding-box) extent of the collection defined by `keys`.
"""
coll = self.get_collection(keys)
return coll["extent"]["spatial"]["bbox"]
def has_temporal_extent(self, keys):
"""Determine whether a collection described by `keys` has a temporal extent section."""
coll = self.get_collection(keys)
extent = coll["extent"].get("temporal")
return extent is not None
def get_temporal_extent(self, keys):
"""
Return the time coordinate points for the collection defined by `keys`.
"""
coll = self.get_collection(keys)
times = coll["extent"]["temporal"]
try:
t_values = times["values"]
except KeyError:
t_values = times["interval"]
datetime_strings = []
for value in t_values:
datetime_gen = ISO8601Expander(value)
datetime_strings.extend(datetime_gen.datetime_strings)
return datetime_strings
def has_vertical_extent(self, keys):
"""Determine whether a collection described by `keys` has a vertical extent section."""
coll = self.get_collection(keys)
extent = coll["extent"].get("vertical")
return extent is not None
def get_vertical_extent(self, keys):
"""
Return the vertical coordinate points for the collection defined by `keys`.
"""
coll = self.get_collection(keys)
vertical = coll["extent"]["vertical"]
z_desc_keys = ["interval", "values"]
return vertical[z_desc_keys[1]] # Presume we'll have explicit values.
def get_query_types(self, keys):
"""Return a list of the query types supported against a collection defined by `keys`."""
coll = self.get_collection(keys)
return list(coll['data_queries'].keys())
def get_collection_parameters(self, keys):
"""
Get descriptions of the datasets (that is, environmental quantities / parameters / phenomena)
provided by a collection defined by `keys`.
"""
coll = self.get_collection(keys)
params_dict = {}
for param_id, param_desc in coll["parameter_names"].items():
label_provider = param_desc["observedProperty"]["label"]
label = self._handle_label(label_provider)
units = param_desc["unit"]["symbol"]["value"]
params_dict[param_id] = {"label": label, "units": units}
return params_dict
def query_position(self):
"""
Request data values and coordinate point arrays for a specific dataset provided
by the EDR Server. A `position` request is one of the specifically defined query
types in EDR, along with `location` and `items`, and is used for returning data
at a particular location.
"""
raise NotImplemented
def query_items(self):
"""
Request predefined data objects the EDR Server. An `items` request is one of the
specifically defined query types in EDR, along with `position` and `location`.
It is used for returning whole dataset objects, such as NetCDF files.
"""
raise NotImplementedError
def _query(self, coll_id, query_type, param_names=None, **query_kwargs):
"""Run a query to return data from the EDR Server."""
coll = self.get_collection(coll_id)
# Set up the dict to format the query string based on query type.
format_dict = dict(coll_id=coll["id"])
if query_type == "locations":
loc_id = query_kwargs.pop("loc_id")
format_dict["query_type"] = f"locations/{loc_id}"
else:
format_dict["query_type"] = query_type
# Construct the query string for the request.
parameter_str = "{key}={value}"
query_items = [parameter_str.format(key=k, value=v) for k, v in query_kwargs.items()]
if param_names is not None:
if not isinstance(param_names, str):
param_names = ",".join(param_names)
query_items.append(parameter_str.format(key="parameter-name", value=param_names))
query_str = "&".join(query_items)
format_dict["query_str"] = query_str
# Make the request and set up the data handler from the response.
query_uri = self._query_str.format(**format_dict)
print(query_uri)
data_json = self._get_covjson(query_uri)
self.data_handler = DataHandler(data_json)
def query(self, coll_id, query_type, param_names=None, **query_kwargs):
"""
Define a generic query to submit to the EDR Server.
Args and kwargs:
* `coll_id` is an identifier for a collection
* `query_type` is a valid query type to submit to the EDR Server. This can be one
of `radius`, `area`, `cube`, `trajectory`, `corridor`, `position`, `locations`, `items`;
note that not all query types are guaranteed to be supported by the EDR Server.
An `AssertionError` will be raised if the query type is not supported by the EDR Server.
If `query_type` is set to `locations`, a location ID **must** be specified in
the query kwargs using the key `loc_id`.
* `param_names`: names of parameters, available in the collection defined by `coll_id`,
to return data for.
* `query_kwargs`: parameters to construct the parameter string of the query.
Valid parameters vary between query types; check the EDR documentation for more
information. Common parameter **keys** include `coords`, `parameter-name`, `z`, `datetime`,
`crs` and `f` (for return type of the result from the EDR Server). **Values** _must_ be
appropriately formatted strings.
Note: it is up to the calling scope to ensure that valid query kwargs are
passed. No parameter validation is performed here; a query will be constructed
and submitted to the EDR Server without further checks.
"""
self.data_handler = None # Reset the `data_handler` attribute.
# Confirm the EDR Server can handle the sort of query requested.
available_query_types = self.get_query_types(coll_id)
if query_type in available_query_types:
self._query(coll_id, query_type, param_names=param_names, **query_kwargs)
else:
self.errors = f"Query type {query_type!r} not supported by server."
| 40.057229
| 103
| 0.629295
|
59ab9d70d8fa277834b377071381b3c6387ccd67
| 2,854
|
py
|
Python
|
seed/seed.py
|
julianadecarvalho/HearTrans-front-end
|
b72b0d3337fca30276102b73c5dee1e1f1342db1
|
[
"MIT"
] | 1
|
2022-01-24T09:34:41.000Z
|
2022-01-24T09:34:41.000Z
|
seed/seed.py
|
julianadecarvalho/HearTrans-front-end
|
b72b0d3337fca30276102b73c5dee1e1f1342db1
|
[
"MIT"
] | null | null | null |
seed/seed.py
|
julianadecarvalho/HearTrans-front-end
|
b72b0d3337fca30276102b73c5dee1e1f1342db1
|
[
"MIT"
] | 1
|
2021-08-10T16:30:04.000Z
|
2021-08-10T16:30:04.000Z
|
# # import requests
# # import json
# # url = "http://heartrans-back.herokuapp.com/providers/"
# # payload = json.dumps({
# # "fullName": "Sydney Woolston",
# # "otherNames": [
# # "Sydney Woolston"
# # ],
# # "titles": [
# # "License Mental Health Counselor Associate"
# # ],
# # "specialties": ["Psychology"],
# # "services": [
# # "Couples/Relationship therapy",
# # "Individual therapy"
# # ],
# # "languages": ["English"],
# # "remoteVisits": True,
# # "slidingScalePay": False
# # })
# # headers = {
# # 'Content-Type': 'application/json'
# # }
# # response = requests.request("POST", url, headers=headers, data=payload)
# # print(response.text)
# import requests
# import json
# # provider request bodies
# url = "http://heartrans-back.herokuapp.com/providers/"
# payload = json.dumps({
# "fullName": "Chester Robachinski",
# "otherNames": [
# "Chet Robachinski"
# ],
# "titles": [
# "MD"
# ],
# "specialties": "Psychiatry",
# "services": [
# "Couples/Relationship therapy",
# "Individual therapy",
# "Medication management"
# ],
# "languages": ["English"],
# "remoteVisits": True,
# "slidingScalePay": False
# })
# headers = {
# 'Content-Type': 'application/json'
# }
# response = requests.request("POST", url, headers=headers, data=payload)
# print(response.text)
# url = "http://heartrans-back.herokuapp.com/providers/"
# payload = json.dumps({
# "fullName": "Sydney Woolston",
# "otherNames": [
# "Sydney Woolston"
# ],
# "titles": [
# "MD"
# ],
# "specialties": ["Psychology"],
# "services": [
# "Couples/Relationship therapy",
# "Individual therapy",
# ],
# "languages": ["English"],
# "remoteVisits": True,
# "slidingScalePay": False
# })
# headers = {
# 'Content-Type': 'application/json'
# }
# response = requests.request("POST", url, headers=headers, data=payload)
# print(response.text)
# url = "http://heartrans-back.herokuapp.com/providers/"
# payload = json.dumps({
# "fullName": "Rebecca Virata",
# "otherNames": [
# "Rebecca Virata"
# ],
# "titles": [
# "MD"
# ],
# "specialties": ["Primary Care"],
# "services": [
# "COVID-19 Testing"
# ],
# "languages": ["English"],
# "remoteVisits": True,
# "slidingScalePay": False
# })
# headers = {
# 'Content-Type': 'application/json'
# }
# response = requests.request("POST", url, headers=headers, data=payload)
# print(response.text)
# # location request bodies
# url = "http://heartrans-back.herokuapp.com/locations/new/"
#
# payload={}
# headers = {}
#
# response = requests.request("POST", url, headers=headers, data=payload)
#
# print(response.text)
| 25.256637
| 75
| 0.564121
|
1789495f361718add91af2ef1aecdbbec90f4e80
| 1,260
|
py
|
Python
|
tools/gcp-org-hierarchy-viewer/setup.py
|
ruchirjain86/professional-services
|
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
|
[
"Apache-2.0"
] | 24
|
2019-03-05T19:23:23.000Z
|
2022-01-24T18:21:33.000Z
|
tools/gcp-org-hierarchy-viewer/setup.py
|
ruchirjain86/professional-services
|
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
|
[
"Apache-2.0"
] | 6
|
2020-01-28T22:42:21.000Z
|
2021-10-30T05:50:15.000Z
|
tools/gcp-org-hierarchy-viewer/setup.py
|
ruchirjain86/professional-services
|
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
|
[
"Apache-2.0"
] | 13
|
2019-10-27T18:49:32.000Z
|
2022-03-06T00:02:21.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
setup(name='gcp-org-hierarchy-viewer',
version='1.0',
license='Apache v2.0',
author='Paul Durivage',
author_email='durivage@google.com',
description='Tool to visualize a Google Cloud organization hierarchy on '
'the command line',
scripts=['gcpohv_cli.py'],
entry_points={'console_scripts': ['gcpohv=gcpohv_cli:main']},
install_requires=[
'google-api-python-client>=1.7.8',
'asciitree==0.3.3',
],
classifiers=[
'Programming Language :: Python :: 3.7'
'Programming Language :: Python :: 3.6'
'Programming Language :: Python :: 3.5'
])
| 36
| 79
| 0.674603
|
5a6e877b423eff9a936242404288c3f901894adb
| 8,936
|
py
|
Python
|
kmip/tests/unit/core/factories/payloads/test_response.py
|
ondrap/PyKMIP
|
c8ea17d8faf827e0f9d004972835128a1a71569f
|
[
"Apache-2.0"
] | 179
|
2015-03-20T06:08:59.000Z
|
2022-03-14T02:24:38.000Z
|
kmip/tests/unit/core/factories/payloads/test_response.py
|
imharshr/PyKMIP
|
9403ff3d2aa83de4c786b8eedeb85d169fd4a594
|
[
"Apache-2.0"
] | 600
|
2015-04-08T14:14:48.000Z
|
2022-03-28T13:49:47.000Z
|
kmip/tests/unit/core/factories/payloads/test_response.py
|
imharshr/PyKMIP
|
9403ff3d2aa83de4c786b8eedeb85d169fd4a594
|
[
"Apache-2.0"
] | 131
|
2015-03-30T12:51:49.000Z
|
2022-03-23T04:34:34.000Z
|
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from kmip.core import enums
from kmip.core.factories.payloads.response import ResponsePayloadFactory
from kmip.core.messages import payloads
class TestResponsePayloadFactory(testtools.TestCase):
def setUp(self):
super(TestResponsePayloadFactory, self).setUp()
self.factory = ResponsePayloadFactory()
def tearDown(self):
super(TestResponsePayloadFactory, self).tearDown()
def _test_not_implemented(self, func, args):
self.assertRaises(NotImplementedError, func, args)
def _test_payload_type(self, payload, payload_type):
msg = "expected {0}, received {1}".format(payload_type, payload)
self.assertIsInstance(payload, payload_type, msg)
def test_create_create_payload(self):
payload = self.factory.create(enums.Operation.CREATE)
self._test_payload_type(payload, payloads.CreateResponsePayload)
def test_create_create_key_pair_payload(self):
payload = self.factory.create(enums.Operation.CREATE_KEY_PAIR)
self._test_payload_type(
payload,
payloads.CreateKeyPairResponsePayload
)
def test_create_register_payload(self):
payload = self.factory.create(enums.Operation.REGISTER)
self._test_payload_type(payload, payloads.RegisterResponsePayload)
def test_create_rekey_payload(self):
payload = self.factory.create(enums.Operation.REKEY)
self._test_payload_type(payload, payloads.RekeyResponsePayload)
def test_create_derive_key_payload(self):
payload = self.factory.create(enums.Operation.DERIVE_KEY)
self._test_payload_type(payload, payloads.DeriveKeyResponsePayload)
def test_create_certify_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.CERTIFY
)
def test_create_recertify_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.RECERTIFY
)
def test_create_locate_payload(self):
payload = self.factory.create(enums.Operation.LOCATE)
self._test_payload_type(payload, payloads.LocateResponsePayload)
def test_create_check_payload(self):
payload = self.factory.create(enums.Operation.CHECK)
self._test_payload_type(payload, payloads.CheckResponsePayload)
def test_create_get_payload(self):
payload = self.factory.create(enums.Operation.GET)
self._test_payload_type(payload, payloads.GetResponsePayload)
def test_create_get_attributes_payload(self):
payload = self.factory.create(enums.Operation.GET_ATTRIBUTES)
self._test_payload_type(
payload,
payloads.GetAttributesResponsePayload
)
def test_create_get_attributes_list_payload(self):
payload = self.factory.create(enums.Operation.GET_ATTRIBUTE_LIST)
self._test_payload_type(
payload,
payloads.GetAttributeListResponsePayload
)
def test_create_add_attribute_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.ADD_ATTRIBUTE
)
def test_create_modify_attribute_payload(self):
payload = self.factory.create(enums.Operation.MODIFY_ATTRIBUTE)
self.assertIsInstance(payload, payloads.ModifyAttributeResponsePayload)
def test_create_delete_attribute_payload(self):
payload = self.factory.create(enums.Operation.DELETE_ATTRIBUTE)
self.assertIsInstance(payload, payloads.DeleteAttributeResponsePayload)
def test_create_set_attribute_payload(self):
payload = self.factory.create(enums.Operation.SET_ATTRIBUTE)
self.assertIsInstance(payload, payloads.SetAttributeResponsePayload)
def test_create_obtain_lease_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.OBTAIN_LEASE
)
def test_create_get_usage_allocation_payload(self):
self._test_not_implemented(
self.factory.create, enums.Operation.GET_USAGE_ALLOCATION)
def test_create_activate_payload(self):
payload = self.factory.create(enums.Operation.ACTIVATE)
self._test_payload_type(payload, payloads.ActivateResponsePayload)
def test_create_revoke_payload(self):
payload = self.factory.create(enums.Operation.REVOKE)
self._test_payload_type(payload, payloads.RevokeResponsePayload)
def test_create_destroy_payload(self):
payload = self.factory.create(enums.Operation.DESTROY)
self._test_payload_type(payload, payloads.DestroyResponsePayload)
def test_create_archive_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.ARCHIVE
)
def test_create_recover_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.RECOVER
)
def test_create_validate_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.VALIDATE
)
def test_create_query_payload(self):
payload = self.factory.create(enums.Operation.QUERY)
self._test_payload_type(payload, payloads.QueryResponsePayload)
def test_create_cancel_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.CANCEL
)
def test_create_poll_payload(self):
self._test_not_implemented(self.factory.create, enums.Operation.POLL)
def test_create_notify_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.NOTIFY
)
def test_create_put_payload(self):
self._test_not_implemented(self.factory.create, enums.Operation.PUT)
def test_create_rekey_key_pair_payload(self):
payload = self.factory.create(enums.Operation.REKEY_KEY_PAIR)
self._test_payload_type(
payload,
payloads.RekeyKeyPairResponsePayload
)
def test_create_discover_versions_payload(self):
payload = self.factory.create(enums.Operation.DISCOVER_VERSIONS)
self._test_payload_type(
payload,
payloads.DiscoverVersionsResponsePayload
)
def test_create_encrypt_payload(self):
payload = self.factory.create(enums.Operation.ENCRYPT)
self._test_payload_type(payload, payloads.EncryptResponsePayload)
def test_create_decrypt_payload(self):
payload = self.factory.create(enums.Operation.DECRYPT)
self._test_payload_type(payload, payloads.DecryptResponsePayload)
def test_create_sign_payload(self):
payload = self.factory.create(enums.Operation.SIGN)
self._test_payload_type(payload, payloads.SignResponsePayload)
def test_create_signature_verify_payload(self):
payload = self.factory.create(enums.Operation.SIGNATURE_VERIFY)
self._test_payload_type(
payload,
payloads.SignatureVerifyResponsePayload
)
def test_create_mac_payload(self):
payload = self.factory.create(enums.Operation.MAC)
self._test_payload_type(
payload,
payloads.MACResponsePayload
)
def test_create_mac_verify_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.MAC_VERIFY
)
def test_create_rng_retrieve_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.RNG_RETRIEVE
)
def test_create_rng_seed_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.RNG_SEED
)
def test_create_hash_payload(self):
self._test_not_implemented(self.factory.create, enums.Operation.HASH)
def test_create_create_split_key_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.CREATE_SPLIT_KEY
)
def test_create_join_split_key_payload(self):
self._test_not_implemented(
self.factory.create,
enums.Operation.JOIN_SPLIT_KEY
)
| 35.320158
| 79
| 0.709266
|
67495efd734724bf6dc756a782ea39d10a510407
| 4,192
|
py
|
Python
|
UServer/http_api_no_auth/api/api_msg.py
|
soybean217/lora-python
|
9c4324f81bae8b20f6c353447189f724a5cf54c6
|
[
"MIT"
] | null | null | null |
UServer/http_api_no_auth/api/api_msg.py
|
soybean217/lora-python
|
9c4324f81bae8b20f6c353447189f724a5cf54c6
|
[
"MIT"
] | null | null | null |
UServer/http_api_no_auth/api/api_msg.py
|
soybean217/lora-python
|
9c4324f81bae8b20f6c353447189f724a5cf54c6
|
[
"MIT"
] | null | null | null |
import json
from binascii import hexlify
from flask import Response, request
from userver.object.device import Device
from userver.object.group import Group
from .decorators import msg_filter_valid
from userver.object.application import Application
from ..http_auth import auth
from userver.object.message import MsgUp, MsgDn
from . import api, root
@api.route(root+'msg-up', methods=['GET'])
@auth.auth_required
@msg_filter_valid
def msg_up(user, app=None, device=None, group=None, start_ts=0, end_ts=-1):
if request.method == 'GET':
if group is not None:
return Response(status=404, response=json.dumps('Group has no message up.'))
elif device is None and app is not None:
msg_all = []
msgs = MsgUp.objects.all(app.app_eui, start_ts=start_ts, end_ts=end_ts, cur_cnt=250)
msg_all.append({'app': hexlify(app.app_eui).decode(), 'message_up': [msg.obj_to_dict() for msg in msgs]})
msg_up_json = json.dumps(msg_all)
elif app is not None and device is not None:
msgs = MsgUp.objects.all(app.app_eui, device.dev_eui, start_ts=start_ts, end_ts=end_ts, cur_cnt=250)
msg_up_json = json.dumps([msg.obj_to_dict() for msg in msgs])
else:
apps = Application.query.filter_by(user_id=user.id)
msg_all = []
for app in apps:
msgs = MsgUp.objects.all(app.app_eui, start_ts=start_ts, end_ts=end_ts, cur_cnt=250)
msg_all.append({'app': hexlify(app.app_eui).decode(), 'message_up': [msg.obj_to_dict() for msg in msgs]})
msg_up_json = json.dumps(msg_all)
return Response(status=200, response=msg_up_json)
@api.route(root+'msg-down', methods=['GET'])
@auth.auth_required
@msg_filter_valid
def msg_down(user, app=None, group=None, device=None, start_ts=0, end_ts=-1):
if request.method == 'GET':
if device is None and group is None and app is not None:
msg_list = []
devices = Device.objects.all(app.app_eui)
groups = Group.objects.all(app.app_eui)
for device in devices:
msgs = MsgDn.objects.all(type='DEV', eui=device.dev_eui, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
for group in groups:
msgs = MsgDn.objects.all(type='GROUP', eui=group.id, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
return Response(status=200, response=json.dumps(msg_list))
elif app is None and group is None and device is None:
apps = Application.query.filter_by(user_id=user.id)
app_list = []
for app in apps:
msg_list = []
devices = Device.objects.all(app.app_eui)
groups = Group.objects.all(app.app_eui)
for device in devices:
msgs = MsgDn.objects.all(type='DEV', eui=device.dev_eui, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
for group in groups:
msgs = MsgDn.objects.all(type='GROUP', eui=group.id, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
app_list.append({'app': hexlify(app.app_eui).decode(),
'message_down': msg_list})
return Response(status=200, response=json.dumps(app_list))
else:
msg_list = []
if device is not None:
msgs = MsgDn.objects.all(type='DEV', eui=device.dev_eui, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
if group is not None:
msgs = MsgDn.objects.all(type='GROUP', eui=group.id, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
return Response(status=200, response=json.dumps(msg_list))
| 48.744186
| 121
| 0.605439
|
3e42ce74f923bbc56ab82a808b31b619030b0971
| 633
|
py
|
Python
|
oops_fhir/r4/value_set/push_type_available.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/push_type_available.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/push_type_available.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.push_type_available import (
pushtypeavailable as pushtypeavailable_,
)
__all__ = ["pushtypeavailable"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class pushtypeavailable(pushtypeavailable_):
"""
Push-type-available
Type of alerts/updates the primary source can send
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/verificationresult-push-type-available
"""
class Meta:
resource = _resource
| 20.419355
| 71
| 0.747235
|
cd42264e36d415ac5f4a3d2630edb7684ce1eca6
| 4,120
|
py
|
Python
|
venv/Lib/site-packages/falcon/cmd/print_routes.py
|
RafaelHMachado/Cioffis_Automation
|
07965ca71c3d4e78f5cee1fce4ba0bbfe2db9811
|
[
"MIT"
] | 2
|
2020-12-09T17:26:25.000Z
|
2021-05-07T02:21:57.000Z
|
venv/Lib/site-packages/falcon/cmd/print_routes.py
|
RafaelHMachado/Cioffis_Automation
|
07965ca71c3d4e78f5cee1fce4ba0bbfe2db9811
|
[
"MIT"
] | 5
|
2021-06-29T18:34:13.000Z
|
2021-06-29T18:34:44.000Z
|
venv/Lib/site-packages/falcon/cmd/print_routes.py
|
RafaelHMachado/Cioffis_Automation
|
07965ca71c3d4e78f5cee1fce4ba0bbfe2db9811
|
[
"MIT"
] | 1
|
2021-08-18T14:45:19.000Z
|
2021-08-18T14:45:19.000Z
|
#!/usr/bin/env python
# Copyright 2013 by Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script that prints out the routes of an API instance.
"""
from __future__ import print_function
from functools import partial
import inspect
import falcon
def print_routes(api, verbose=False): # pragma: no cover
"""
Initial call.
:param api: The falcon.API or callable that returns an instance to look at.
:type api: falcon.API or callable
:param verbose: If the output should be verbose.
:type verbose: bool
"""
traverse(api._router._roots, verbose=verbose)
def traverse(roots, parent='', verbose=False):
"""
Recursive call which also handles printing output.
:param api: The falcon.API or callable that returns an instance to look at.
:type api: falcon.API or callable
:param parent: The parent uri path to the current iteration.
:type parent: str
:param verbose: If the output should be verbose.
:type verbose: bool
"""
for root in roots:
if root.method_map:
print('->', parent + '/' + root.raw_segment)
if verbose:
for method, func in root.method_map.items():
if func.__name__ != 'method_not_allowed':
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
try:
source_file = inspect.getsourcefile(real_func)
source_lines = inspect.getsourcelines(real_func)
source_info = '{}:{}'.format(source_file,
source_lines[1])
except TypeError:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = '[unknown file]'
print('-->' + method, source_info)
if root.children:
traverse(root.children, parent + '/' + root.raw_segment, verbose)
def main():
"""
Main entrypoint.
"""
import argparse
parser = argparse.ArgumentParser(
description='Example: print-api-routes myprogram:app')
parser.add_argument(
'-v', '--verbose', action='store_true',
help='Prints out information for each method.')
parser.add_argument(
'api_module',
help='The module and api to inspect. Example: myapp.somemodule:api',
)
args = parser.parse_args()
try:
module, instance = args.api_module.split(':', 1)
except ValueError:
parser.error(
'The api_module must include a colon between '
'the module and instance')
api = getattr(__import__(module, fromlist=[True]), instance)
if not isinstance(api, falcon.API):
if callable(api):
api = api()
if not isinstance(api, falcon.API):
parser.error(
'{0} did not return a falcon.API instance'.format(
args.api_module))
else:
parser.error(
'The instance must be of falcon.API or be '
'a callable without args that returns falcon.API')
print_routes(api, verbose=args.verbose)
if __name__ == '__main__':
main()
| 34.621849
| 79
| 0.586893
|
d9e7654c2378d91c551b3f6a019e6906234fd664
| 6,779
|
py
|
Python
|
np_ml/decision_tree/decision_tree.py
|
wwwy-binary/NP_ML
|
a51b2f3cd753e4a8b5a67bec343c3e75b3fe52d8
|
[
"MIT"
] | 237
|
2018-03-17T08:50:18.000Z
|
2022-02-24T12:57:46.000Z
|
np_ml/decision_tree/decision_tree.py
|
pawopawo/NP_ML
|
a4cba12f191348526a6f6cc94df5084658fcfdea
|
[
"MIT"
] | 2
|
2019-01-28T03:30:31.000Z
|
2021-03-03T01:47:38.000Z
|
np_ml/decision_tree/decision_tree.py
|
pawopawo/NP_ML
|
a4cba12f191348526a6f6cc94df5084658fcfdea
|
[
"MIT"
] | 79
|
2018-03-21T12:22:09.000Z
|
2021-12-17T02:39:09.000Z
|
import numpy as np
def entropy(col):
_, cnts = np.unique(col, return_counts=True)
cnts = np.array(cnts)/len(col)
return -np.sum(cnts*np.log2(cnts))
# For ID3
def calcInforGain(col_x, col_y):
HD = entropy(col_y)
HDA = 0
unique, cnts = np.unique(col_x, return_counts=True)
cnts = np.array(cnts)/len(col_x)
cnts = dict(zip(unique, cnts))
for key, val in cnts.items():
HDA += val*entropy(col_y[col_x == key])
return HD - HDA, unique
# For C4.5
def calcInforGainRatio(col_x, col_y):
HD = entropy(col_y)
HDA = 0
unique, cnts = np.unique(col_x, return_counts=True)
cnts = np.array(cnts)/len(col_x)
cnts = dict(zip(unique, cnts))
for key, val in cnts.items():
HDA += val*entropy(col_y[col_x == key])
return (HD - HDA)/entropy(col_x), unique
# For CART
def Gini(col):
unique, cnts = np.unique(col, return_counts=True)
cnts = np.array(cnts)/len(col)
return 1 - np.sum(cnts ** 2)
def findMinGini(col_x, col_y):
unique, cnts = np.unique(col_x, return_counts=True)
cnts = dict(zip(unique, cnts))
min_gini = 1
min_key = None
for key, cnt in cnts.items():
gini = cnt/len(col_y)*Gini(col_y[col_x == key]) + (1-cnt/len(col_y))*Gini(col_y[col_x != key])
if gini < min_gini:
min_gini = gini
min_key = key
return min_gini, min_key
class Node:
def __init__(self, key, val, depth):
self.key = key
self.val = val
self.depth = depth
self.children = []
def __str__(self, indent=0):
ans = ""
if not self.children:
ans = str(self.key) + ": " + str(self.val) + ""
else:
ans += str(self.key) + ": " + str(self.val) + "("
for child in self.children:
ans += str(child) + ", "
ans += ")"
return ans
def addChild(self, key, val, depth=0):
self.children.append(Node(key, val, depth))
return self.children[-1]
class DecisionTree:
def __init__(self, epsilon=0, max_depth=-1): # here depth=-1 means no constrain
self.root = Node("root", 0, max_depth)
self.epsilon = epsilon
self.type = None
def fit(self, x, y, type="CART", detailed=False):
self.type = type
if type == "CART":
self.CARTgenerate(x, y, self.root, detailed)
else:
self.generate(x, y, self.root, type, detailed)
def generate(self, x, y, root, type, detailed):
# if empty
if x.size == 0:
return
# if all left are the same kind
if np.all(y == True) or np.all(y == False):
root.addChild("leaf", y[0])
return
# if all the feature are the same, use the popular one
if np.all(x == x[0,:]) or root.depth == 0:
unique, cnts = np.unique(y, return_counts=True)
cnts = dict(zip(unique, cnts))
root.addChild("leaf", cnts[True] > cnts[False])
return
max_gain = 0
max_feature = -1
max_feature_vals = None
for i in range(x.shape[-1]):
if type=="ID3":
gain, feature_vals = calcInforGain(x[:, i], y)
elif type=="C4.5":
gain, feature_vals = calcInforGainRatio(x[:, i], y)
if gain > max_gain:
max_gain = gain
max_feature = i
max_feature_vals = feature_vals
if max_gain < self.epsilon:
return
else:
for val in max_feature_vals:
child = root.addChild(max_feature, val, root.depth-1)
self.generate(np.delete(x[x[:, max_feature]==val], max_feature, axis=-1), y[x[:, max_feature]==val], child, type, detailed)
def CARTgenerate(self, x, y, root, detailed, min_gini_old=1):
# if empty
if x.size == 0:
return
# if all left are the same kind
if np.all(y == True) or np.all(y == False):
root.addChild("leaf", y[0])
return
# if all the feature are the same, use the popular one
if np.all(x == x[0,:]) or root.depth == 0:
unique, cnts = np.unique(y, return_counts=True)
cnts = dict(zip(unique, cnts))
root.addChild("leaf", cnts[True] > cnts[False])
return
min_gini = 1
min_feature = None
min_feature_val = None
for i in range(x.shape[-1]):
gini, feature_val = findMinGini(x[:, i], y)
if detailed:
print(gini, feature_val, i)
if gini < min_gini:
min_gini = gini
min_feature = i
min_feature_val = feature_val
if abs(min_gini - min_gini_old) < 1e-6: # all feature are random
unique, cnts = np.unique(y, return_counts=True)
cnts = dict(zip(unique, cnts))
root.addChild("leaf", cnts[True] > cnts[False])
return
child_true = root.addChild((min_feature, min_feature_val,), True, root.depth-1)
self.CARTgenerate(x[x[:, min_feature]==min_feature_val], y[x[:, min_feature]==min_feature_val], child_true, detailed, min_gini)
child_false = root.addChild((min_feature, min_feature_val,), False, root.depth-1)
self.CARTgenerate(x[x[:, min_feature]!=min_feature_val], y[x[:, min_feature]!=min_feature_val], child_false, detailed, min_gini)
# TODO: find nice regularization function
def pruning(self, root):
pass
def predict(self, x):
assert(len(self.root.children) > 0)
if len(x.shape) == 1:
tmp = self.root
if self.type == 'CART':
while len(tmp.children) > 1:
feature = tmp.children[0].key[0]
if x[feature] == tmp.children[0].key[1]:
tmp = tmp.children[0]
else:
tmp = tmp.children[1]
if len(tmp.children) == 1 and tmp.children[0].key == 'leaf':
return tmp.children[0].val
else:
while len(tmp.children) > 1:
feature = tmp.children[0].key
if x[feature] == tmp.children[0].val:
tmp = tmp.children[0]
else:
tmp = tmp.children[1]
if len(tmp.children) == 1 and tmp.children[0].key == 'leaf':
return tmp.children[0].val
else:
assert(len(x.shape) == 2)
ans = []
for test in x:
ans.append(self.predict(test))
return ans
| 36.446237
| 139
| 0.526036
|
3ea878e62e085722c30ee317f265a685067baf7d
| 1,650
|
py
|
Python
|
pkg/codegen/internal/test/testdata/simple-resource-schema/python/pulumi_example/arg_function.py
|
followben/pulumi
|
db6c4b88cbee47332bc305f594b1c15eb6803bd7
|
[
"Apache-2.0"
] | null | null | null |
pkg/codegen/internal/test/testdata/simple-resource-schema/python/pulumi_example/arg_function.py
|
followben/pulumi
|
db6c4b88cbee47332bc305f594b1c15eb6803bd7
|
[
"Apache-2.0"
] | null | null | null |
pkg/codegen/internal/test/testdata/simple-resource-schema/python/pulumi_example/arg_function.py
|
followben/pulumi
|
db6c4b88cbee47332bc305f594b1c15eb6803bd7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities, _tables
from .resource import Resource
__all__ = [
'ArgFunctionResult',
'AwaitableArgFunctionResult',
'arg_function',
]
@pulumi.output_type
class ArgFunctionResult:
def __init__(__self__, result=None):
if result and not isinstance(result, Resource):
raise TypeError("Expected argument 'result' to be a Resource")
pulumi.set(__self__, "result", result)
@property
@pulumi.getter
def result(self) -> Optional['Resource']:
return pulumi.get(self, "result")
class AwaitableArgFunctionResult(ArgFunctionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ArgFunctionResult(
result=self.result)
def arg_function(arg1: Optional['Resource'] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableArgFunctionResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['arg1'] = arg1
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('example::argFunction', __args__, opts=opts, typ=ArgFunctionResult).value
return AwaitableArgFunctionResult(
result=__ret__.result)
| 30
| 109
| 0.684242
|
806bf3f8e5b1281568b02bc1d0510ff34daba816
| 1,161
|
py
|
Python
|
implementation/scripts/base/visualize_mesh.py
|
saurabbhsp/mesh-3d-reconstruction
|
c52312bce7e3121643189f6b67192ffe28b08565
|
[
"Apache-2.0"
] | null | null | null |
implementation/scripts/base/visualize_mesh.py
|
saurabbhsp/mesh-3d-reconstruction
|
c52312bce7e3121643189f6b67192ffe28b08565
|
[
"Apache-2.0"
] | null | null | null |
implementation/scripts/base/visualize_mesh.py
|
saurabbhsp/mesh-3d-reconstruction
|
c52312bce7e3121643189f6b67192ffe28b08565
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import argparse
import configparser
from model.templateFFD.templateFFDBuilder import TemplateFFDBuilder
from configReader import config_reader as configReader
"""
This script initialized the training process. For training process
a single parameter specifying the path to configuration file is required.
"""
parser = argparse.ArgumentParser(description="Train FFD model")
parser.add_argument('configFile', help='Config file path')
args = parser.parse_args()
tf.logging.set_verbosity(tf.logging.INFO)
config = configparser.ConfigParser()
config.optionxform = str
config.read(args.configFile)
"""Read config"""
category = configReader.get_category(config)
path_dictionary = configReader.get_path_list(config)
model_params = configReader.get_model_params(config)
split_config = configReader.get_split_params(config)
id = configReader.get_model_id(config)
max_steps = configReader.get_max_steps(config)
train_config = configReader.get_train_config(config)
model = TemplateFFDBuilder(id, model_params,
path_dictionary, split_config, train_config)
model.visualize_predictions(tf.estimator.ModeKeys.PREDICT)
| 31.378378
| 73
| 0.810508
|
f61993ab283a16b72f5ae643c4a89753a918796c
| 5,350
|
py
|
Python
|
encoding/models/cifarresnet.py
|
pansiyuan123/PyTorch-Encoding
|
7463e5938fb824b8eddbffdf53f55308464252d3
|
[
"MIT"
] | 4
|
2020-03-26T11:05:08.000Z
|
2020-12-22T08:37:20.000Z
|
encoding/models/cifarresnet.py
|
pansiyuan123/PyTorch-Encoding
|
7463e5938fb824b8eddbffdf53f55308464252d3
|
[
"MIT"
] | null | null | null |
encoding/models/cifarresnet.py
|
pansiyuan123/PyTorch-Encoding
|
7463e5938fb824b8eddbffdf53f55308464252d3
|
[
"MIT"
] | 3
|
2020-03-26T11:05:09.000Z
|
2022-01-28T11:29:00.000Z
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import torch
import torch.nn as nn
from torch.autograd import Variable
from ..nn import View
__all__ = ['cifar_resnet20']
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class Basicblock(nn.Module):
""" Pre-activation residual block
Identity Mapping in Deep Residual Networks
ref https://arxiv.org/abs/1603.05027
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, norm_layer=nn.BatchNorm2d):
super(Basicblock, self).__init__()
if inplanes != planes or stride !=1 :
self.downsample = True
self.residual_layer = nn.Conv2d(inplanes, planes,
kernel_size=1, stride=stride)
else:
self.downsample = False
conv_block=[]
conv_block+=[norm_layer(inplanes),
nn.ReLU(inplace=True),
conv3x3(inplanes, planes,stride=stride),
norm_layer(planes),
nn.ReLU(inplace=True),
conv3x3(planes, planes)]
self.conv_block = nn.Sequential(*conv_block)
def forward(self, input):
if self.downsample:
residual = self.residual_layer(input)
else:
residual = input
return residual + self.conv_block(input)
class Bottleneck(nn.Module):
""" Pre-activation residual block
Identity Mapping in Deep Residual Networks
ref https://arxiv.org/abs/1603.05027
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, norm_layer=nn.BatchNorm2d):
super(Bottleneck, self).__init__()
if inplanes != planes*self.expansion or stride !=1 :
self.downsample = True
self.residual_layer = nn.Conv2d(inplanes,
planes * self.expansion, kernel_size=1, stride=stride)
else:
self.downsample = False
conv_block = []
conv_block += [norm_layer(inplanes),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, planes, kernel_size=1,
stride=1, bias=False)]
conv_block += [norm_layer(planes),
nn.ReLU(inplace=True),
nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)]
conv_block += [norm_layer(planes),
nn.ReLU(inplace=True),
nn.Conv2d(planes, planes * self.expansion,
kernel_size=1, stride=1, bias=False)]
self.conv_block = nn.Sequential(*conv_block)
def forward(self, x):
if self.downsample:
residual = self.residual_layer(x)
else:
residual = x
return residual + self.conv_block(x)
class CIFAR_ResNet(nn.Module):
def __init__(self, block=Basicblock, num_blocks=[2,2,2], width_factor = 1,
num_classes=10, norm_layer=torch.nn.BatchNorm2d):
super(CIFAR_ResNet, self).__init__()
self.expansion = block.expansion
self.inplanes = int(width_factor * 16)
strides = [1, 2, 2]
model = []
# Conv_1
model += [nn.Conv2d(3, self.inplanes, kernel_size=3, padding=1),
norm_layer(self.inplanes),
nn.ReLU(inplace=True)]
# Residual units
model += [self._residual_unit(block, self.inplanes, num_blocks[0],
strides[0], norm_layer=norm_layer)]
for i in range(2):
model += [self._residual_unit(
block, int(2*self.inplanes/self.expansion),
num_blocks[i+1], strides[i+1], norm_layer=norm_layer)]
# Last conv layer
model += [norm_layer(self.inplanes),
nn.ReLU(inplace=True),
nn.AvgPool2d(8),
View(-1, self.inplanes),
nn.Linear(self.inplanes, num_classes)]
self.model = nn.Sequential(*model)
def _residual_unit(self, block, planes, n_blocks, stride, norm_layer):
strides = [stride] + [1]*(n_blocks-1)
layers = []
for i in range(n_blocks):
layers += [block(self.inplanes, planes, strides[i], norm_layer=norm_layer)]
self.inplanes = self.expansion*planes
return nn.Sequential(*layers)
def forward(self, input):
return self.model(input)
def cifar_resnet20(pretrained=False, root='~/.encoding/models', **kwargs):
"""Constructs a CIFAR ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = CIFAR_ResNet(Bottleneck, [3, 3, 3], **kwargs)
if pretrained:
model.load_state_dict(torch.load(
get_model_file('cifar_resnet20', root=root)), strict=False)
return model
| 38.214286
| 96
| 0.566542
|
5371e7d3e16dac4b4cfcc48be9a352d77db96ce4
| 244
|
py
|
Python
|
c3po.py
|
PtspluS/Isensei
|
b6a8a02461868a65d9be43a3c44a2df7f63a3c26
|
[
"MIT"
] | null | null | null |
c3po.py
|
PtspluS/Isensei
|
b6a8a02461868a65d9be43a3c44a2df7f63a3c26
|
[
"MIT"
] | null | null | null |
c3po.py
|
PtspluS/Isensei
|
b6a8a02461868a65d9be43a3c44a2df7f63a3c26
|
[
"MIT"
] | null | null | null |
from random import *
# Init your variables here
# Put your bot name here
name = "C3PO"
# C3PO strategy : return random available celle
def play(board, available_cells, player):
return available_cells[randint(0,len(available_cells)-1)]
| 22.181818
| 61
| 0.745902
|
f0928fd400bc64e9e1405215b11e4a9d6a033b69
| 4,767
|
py
|
Python
|
boa3_test/tests/compiler_tests/test_none.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/tests/compiler_tests/test_none.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/tests/compiler_tests/test_none.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
from boa3.boa3 import Boa3
from boa3.exception.CompilerError import MismatchedTypes
from boa3.neo.vm.opcode.Opcode import Opcode
from boa3_test.tests.boa_test import BoaTest
from boa3_test.tests.test_classes.testengine import TestEngine
class TestNone(BoaTest):
default_folder: str = 'test_sc/none_test'
def test_variable_none(self):
path = self.get_contract_path('VariableNone.py')
expected_output = (
Opcode.INITSLOT # function signature
+ b'\x01'
+ b'\x00'
+ Opcode.PUSHNULL
+ Opcode.STLOC0
+ Opcode.RET # return
)
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
def test_none_tuple(self):
path = self.get_contract_path('NoneTuple.py')
expected_output = (
Opcode.INITSLOT # function signature
+ b'\x01'
+ b'\x00'
+ Opcode.PUSHNULL # a = (None, None, None)
+ Opcode.PUSHNULL
+ Opcode.PUSHNULL
+ Opcode.PUSH3
+ Opcode.PACK
+ Opcode.STLOC0
+ Opcode.RET # return
)
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
def test_none_identity(self):
expected_output = (
Opcode.INITSLOT # function signature
+ b'\x00'
+ b'\x01'
+ Opcode.LDARG0
+ Opcode.ISNULL
+ Opcode.RET # return
)
path = self.get_contract_path('NoneIdentity.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main', None)
self.assertEqual(True, result)
result = self.run_smart_contract(engine, path, 'Main', 5)
self.assertEqual(False, result)
result = self.run_smart_contract(engine, path, 'Main', '5')
self.assertEqual(False, result)
def test_none_not_identity(self):
expected_output = (
Opcode.INITSLOT # function signature
+ b'\x00'
+ b'\x01'
+ Opcode.LDARG0
+ Opcode.ISNULL
+ Opcode.NOT
+ Opcode.RET # return
)
path = self.get_contract_path('NoneNotIdentity.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main', None)
self.assertEqual(False, result)
result = self.run_smart_contract(engine, path, 'Main', 5)
self.assertEqual(True, result)
result = self.run_smart_contract(engine, path, 'Main', '5')
self.assertEqual(True, result)
def test_none_equality(self):
path = self.get_contract_path('NoneEquality.py')
self.assertCompilerLogs(MismatchedTypes, path)
def test_mismatched_type_int_operation(self):
path = self.get_contract_path('MismatchedTypesInOperation.py')
self.assertCompilerLogs(MismatchedTypes, path)
def test_reassign_variable_with_none(self):
expected_output = (
Opcode.INITSLOT # function signature
+ b'\x02'
+ b'\x00'
+ Opcode.PUSH2 # a = 2
+ Opcode.STLOC0
+ Opcode.PUSH4 # b = a * 2
+ Opcode.STLOC1
+ Opcode.PUSHNULL # a = None
+ Opcode.STLOC0
+ Opcode.RET # return
)
path = self.get_contract_path('ReassignVariableWithNone.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main')
self.assertIsVoid(result)
def test_reassign_variable_after_none(self):
expected_output = (
Opcode.INITSLOT # function signature
+ b'\x02'
+ b'\x00'
+ Opcode.PUSHNULL # a = None
+ Opcode.STLOC0
+ Opcode.PUSH2 # a = 2
+ Opcode.STLOC0
+ Opcode.PUSH4 # b = a * 2
+ Opcode.STLOC1
+ Opcode.RET # return
)
path = self.get_contract_path('ReassignVariableAfterNone.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'Main')
self.assertIsVoid(result)
def test_boa2_none_test(self):
path = self.get_contract_path('NoneBoa2Test.py')
self.assertCompilerLogs(MismatchedTypes, path)
| 33.335664
| 70
| 0.575414
|
672f1a2113d145c08a444911b9899567d3529e25
| 158
|
py
|
Python
|
src/bd103/__init__.py
|
BD103/Package
|
c1f62b156713c68ac85362f0eae1560fbcca94f0
|
[
"Apache-2.0"
] | 2
|
2020-12-09T00:00:16.000Z
|
2021-04-21T00:12:24.000Z
|
src/bd103/__init__.py
|
BD103/BD103-Python
|
dea7c71d72eac16bcc5b98e8e5c0d5bcfb5da82f
|
[
"Apache-2.0"
] | 2
|
2021-12-14T22:08:10.000Z
|
2021-12-18T12:23:47.000Z
|
src/bd103/__init__.py
|
BD103/BD103-Python
|
dea7c71d72eac16bcc5b98e8e5c0d5bcfb5da82f
|
[
"Apache-2.0"
] | null | null | null |
"""Collection a various utilities and modules that do random stuff.
This is an all-in-one package that contains assorted code that does assorted things.
"""
| 31.6
| 84
| 0.778481
|
7e4f13a20817767c2c3467a493edbce9002e2b35
| 3,481
|
py
|
Python
|
lib/surface/pubsub/topics/list_subscriptions.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/pubsub/topics/list_subscriptions.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/pubsub/topics/list_subscriptions.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | 2
|
2020-11-04T03:08:21.000Z
|
2020-11-05T08:14:41.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Pub/Sub topics list_subscriptions command."""
from googlecloudsdk.api_lib.pubsub import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core.resource import resource_printer_base
from googlecloudsdk.core.resource import resource_projector
class ListSubscriptions(base.ListCommand):
"""Lists Cloud Pub/Sub subscriptions from a given topic.
Lists all of the Cloud Pub/Sub subscriptions attached to the given topic and
that match the given filter.
"""
detailed_help = {
'EXAMPLES': """\
To filter results by subscription name
(ie. only show subscription 'mysubs'), run:
$ {command} --topic mytopic --filter=subscriptionId:mysubs
To combine multiple filters (with AND or OR), run:
$ {command} --topic mytopic --filter="subscriptionId:mysubs1 AND subscriptionId:mysubs2"
To filter subscriptions that match an expression:
$ {command} --topic mytopic --filter="subscriptionId:subs_*"
""",
}
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'topic',
help=('The name of the topic to list subscriptions for.'))
@util.MapHttpError
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Yields:
Subscriptions paths that match the regular expression in args.name_filter.
"""
msgs = self.context['pubsub_msgs']
pubsub = self.context['pubsub']
page_size = None
page_token = None
if args.page_size:
page_size = min(args.page_size, util.MAX_LIST_RESULTS)
if not args.filter and args.limit:
page_size = min(args.limit, page_size or util.MAX_LIST_RESULTS)
while True:
list_subscriptions_req = (
msgs.PubsubProjectsTopicsSubscriptionsListRequest(
topic=util.TopicFormat(args.topic),
pageSize=page_size,
pageToken=page_token))
list_subscriptions_result = pubsub.projects_topics_subscriptions.List(
list_subscriptions_req)
for subscription in list_subscriptions_result.subscriptions:
yield TopicSubscriptionDict(subscription)
page_token = list_subscriptions_result.nextPageToken
if not page_token:
break
yield resource_printer_base.PageMarker()
def TopicSubscriptionDict(topic_subscription):
"""Returns a topic_subscription dict with additional fields."""
result = resource_projector.MakeSerializable(
{'subscription': topic_subscription})
subscription_info = util.SubscriptionIdentifier(topic_subscription)
result['projectId'] = subscription_info.project.project_name
result['subscriptionId'] = subscription_info.resource_name
return result
| 33.152381
| 100
| 0.717897
|
79893abeef2f88502b81f8bb05397b97cfe6101e
| 7,959
|
py
|
Python
|
QQAlbumDownloader/util/SlideVerfication.py
|
graysonwp/QQAlbumDownloader
|
29e8b71c5b46a8084b80e81179454e92b9058c1a
|
[
"MIT"
] | 5
|
2019-09-26T04:11:45.000Z
|
2021-08-17T12:05:48.000Z
|
QQAlbumDownloader/util/SlideVerfication.py
|
graysonwp/QQAlbumDownloader
|
29e8b71c5b46a8084b80e81179454e92b9058c1a
|
[
"MIT"
] | 20
|
2021-09-04T10:40:51.000Z
|
2022-03-12T00:52:48.000Z
|
QQAlbumDownloader/util/SlideVerfication.py
|
wpwbb510582246/QQAlbumDownloader
|
29e8b71c5b46a8084b80e81179454e92b9058c1a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author : Grayson
# @Time : 2020-12-18 21:34
# @Email : weipengweibeibei@163.com
# @Description :
"""
本模块专门用来处理滑动验证码的问题,
"""
from selenium.webdriver import ActionChains
import random, time, os
import cv2
from PIL import Image as Im
import numpy as np
import requests
class SlideVerificationCode():
"""滑动验证码破解"""
def __init__(self, slider_ele=None, background_ele=None, count=1, save_image=False):
"""
:param count: 验证重试的次数,默认为5次
:param save_image: 是否保存验证过程中的图片,默认不保存
"""
self.count = count
self.save_image = save_image
self.slider_ele = slider_ele
self.background_ele = background_ele
def slide_verification(self, driver, slide_element, distance):
"""
:param driver: driver对象
:type driver:webdriver.Chrome
:param slide_element: 滑块的元组
:type slider_ele: WebElement
:param distance: 滑动的距离
:type: int
:return:
"""
# 获取滑动前页面的url地址
start_url = driver.current_url
print("需要滑动的距离为:", distance)
# 根据滑动距离生成滑动轨迹
locus = self.get_slide_locus(distance)
print("生成的滑动轨迹为:{},轨迹的距离之和为{}".format(locus, distance))
# 按下鼠标左键
ActionChains(driver).click_and_hold(slide_element).perform()
time.sleep(0.5)
# 遍历轨迹进行滑动
for loc in locus:
time.sleep(0.01)
ActionChains(driver).move_by_offset(loc, random.randint(-5, 5)).perform()
ActionChains(driver).context_click(slide_element)
# 释放鼠标
ActionChains(driver).release(on_element=slide_element).perform()
# 判读是否验证通过,未通过的情况下重新滑动
time.sleep(2)
# 滑动之后再次获取url地址
end_url = driver.current_url
# 滑动失败的情况下,重试count次
if start_url == end_url and self.count > 0:
print("第{}次验证失败,开启重试".format(6 - self.count))
self.count -= 1
self.slide_verification(driver, slide_element, distance)
def onload_save_img(self, url, filename="image.png"):
"""
下载图片保存
:param url:图片地址
:param filename: 保存的图片名
:return:
"""
try:
response = requests.get(url=url)
except(requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError)as e:
print("图片下载失败")
raise e
else:
with open(filename, "wb") as f:
f.write(response.content)
def get_element_slide_distance(self, slider_ele, background_ele, correct=0):
"""
根据传入滑块,和背景的节点,计算滑块的距离
该方法只能计算 滑块和背景图都是一张完整图片的场景,
如果是通过多张小图拼接起来的背景图,该方法不适用,后续会补充一个专门针对处理该场景的方法
:param slider_ele: 滑块图片的节点
:type slider_ele: WebElement
:param background_ele: 背景图的节点
:type background_ele:WebElement
:param correct:滑块缺口截图的修正值,默认为0,调试截图是否正确的情况下才会用
:type: int
:return: 背景图缺口位置的X轴坐标位置(缺口图片左边界位置)
"""
# 获取验证码的图片
slider_url = slider_ele.get_attribute("src")
background_url = background_ele.get_attribute("src")
# 下载验证码背景图,滑动图片
slider = "slider.jpg"
background = "background.jpg"
self.onload_save_img(slider_url, slider)
self.onload_save_img(background_url, background)
# 读取进行色度图片,转换为numpy中的数组类型数据,
slider_pic = cv2.imread(slider, 0)
background_pic = cv2.imread(background, 0)
# 获取缺口图数组的形状 -->缺口图的宽和高
width, height = slider_pic.shape[::-1]
# 将处理之后的图片另存
slider01 = "slider01.jpg"
background_01 = "background01.jpg"
cv2.imwrite(background_01, background_pic)
cv2.imwrite(slider01, slider_pic)
# 读取另存的滑块图
slider_pic = cv2.imread(slider01)
# 进行色彩转换
slider_pic = cv2.cvtColor(slider_pic, cv2.COLOR_BGR2GRAY)
# 获取色差的绝对值
slider_pic = abs(255 - slider_pic)
# 保存图片
cv2.imwrite(slider01, slider_pic)
# 读取滑块
slider_pic = cv2.imread(slider01)
# 读取背景图
background_pic = cv2.imread(background_01)
# 比较两张图的重叠区域
result = cv2.matchTemplate(slider_pic, background_pic, cv2.TM_CCOEFF_NORMED)
# 通过数组运算,获取图片的缺口位置
top, left = np.unravel_index(result.argmax(), result.shape)
# 背景图中的图片缺口坐标位置
print("当前滑块的缺口位置:", (left, top, left + width, top + height))
# 判读是否需求保存识别过程中的截图文件
if self.save_image:
# 截图滑块保存
# 进行坐标修正
loc = (left + correct, top + correct, left + width - correct, top + height - correct)
self.image_crop(background, loc)
else:
# 删除识别过程中保存的临时文件
os.remove(slider01)
os.remove(background_01)
os.remove(slider)
os.remove(background)
# 返回需要移动的位置距离
return left
def get_image_slide_dictance(self, slider_image, background_image, correct=0):
"""
根据传入滑块,和背景的图片,计算滑块的距离
该方法只能计算 滑块和背景图都是一张完整图片的场景,
如果是通过多张小图拼接起来的背景图,该方法不适用,后续会补充一个专门针对处理该场景的方法
:param slider_iamge: 滑块图的图片
:type slider_image: str
:param background_image: 背景图的图片
:type background_image: str
:param correct:滑块缺口截图的修正值,默认为0,调试截图是否正确的情况下才会用
:type: int
:return: 背景图缺口位置的X轴坐标位置(缺口图片左边界位置)
"""
# 读取进行色度图片,转换为numpy中的数组类型数据,
slider_pic = cv2.imread(slider_image, 0)
background_pic = cv2.imread(background_image, 0)
# 获取缺口图数组的形状 -->缺口图的宽和高
width, height = slider_pic.shape[::-1]
# 将处理之后的图片另存
slider01 = "slider01.jpg"
background_01 = "background01.jpg"
cv2.imwrite(background_01, background_pic)
cv2.imwrite(slider01, slider_pic)
# 读取另存的滑块图
slider_pic = cv2.imread(slider01)
# 进行色彩转换
slider_pic = cv2.cvtColor(slider_pic, cv2.COLOR_BGR2GRAY)
# 获取色差的绝对值
slider_pic = abs(255 - slider_pic)
# 保存图片
cv2.imwrite(slider01, slider_pic)
# 读取滑块
slider_pic = cv2.imread(slider01)
# 读取背景图
background_pic = cv2.imread(background_01)
# 比较两张图的重叠区域
result = cv2.matchTemplate(slider_pic, background_pic, cv2.TM_CCOEFF_NORMED)
# 获取图片的缺口位置
top, left = np.unravel_index(result.argmax(), result.shape)
# 背景图中的图片缺口坐标位置
print("当前滑块的缺口位置:", (left, top, left + width, top + height))
# 判读是否需求保存识别过程中的截图文件
if self.save_image:
# 截图滑块保存
# 进行坐标修正
loc = (left + correct, top + correct, left + width - correct, top + height - correct)
self.image_crop(background_image, loc)
else:
# 删除识别过程中保存的临时文件
os.remove(slider01)
os.remove(background_01)
# 返回需要移动的位置距离
return left
@classmethod
def get_slide_locus(self, distance):
"""
根据移动坐标位置构造移动轨迹,前期移动慢,中期块,后期慢
:param distance:移动距离
:type:int
:return:移动轨迹
:rtype:list
"""
remaining_dist = distance
locus = []
while remaining_dist > 0:
ratio = remaining_dist / distance
if ratio < 0.2:
# 开始阶段移动较慢
span = random.randint(2, 8)
elif ratio > 0.8:
# 结束阶段移动较慢
span = random.randint(5, 8)
else:
# 中间部分移动快
span = random.randint(10, 16)
locus.append(span)
remaining_dist -= span
return locus
def image_crop(self, image, location, new_name="new_image.png"):
"""
对图片的指定位置进行截图
:param image: 被截取图片的坐标位置
:param location:需要截图的坐标位置:(left,top,right,button)
:type location: tuple
:return:
"""
# 打开图片
image = Im.open(image)
# 切割图片
imagecrop = image.crop(location)
# 保存图片
imagecrop.save(new_name)
| 32.353659
| 97
| 0.590903
|
b18c2f63bb3f3a4e0c6fd5b848afb087dccba289
| 2,472
|
py
|
Python
|
deepxml/models/mlp.py
|
svanschalkwyk/deepxml
|
17a357904d100ed14bb70392b20fc8809e9ea2c7
|
[
"MIT"
] | 41
|
2021-03-11T22:15:53.000Z
|
2022-03-29T00:53:09.000Z
|
deepxml/models/mlp.py
|
svanschalkwyk/deepxml
|
17a357904d100ed14bb70392b20fc8809e9ea2c7
|
[
"MIT"
] | 9
|
2021-06-16T02:05:56.000Z
|
2022-01-18T08:41:25.000Z
|
deepxml/models/mlp.py
|
svanschalkwyk/deepxml
|
17a357904d100ed14bb70392b20fc8809e9ea2c7
|
[
"MIT"
] | 9
|
2021-06-11T09:34:21.000Z
|
2022-01-23T01:50:44.000Z
|
import torch
import torch.nn as nn
__author__ = 'KD'
class MLP(nn.Module):
"""
A multi-layer perceptron with flexibility for non-liearity
* no non-linearity after last layer
* support for 2D or 3D inputs
Parameters:
-----------
input_size: int
input size of embeddings
hidden_size: int or list of ints or str (comma separated)
e.g., 512: a single hidden layer with 512 neurons
"512": a single hidden layer with 512 neurons
"512,300": 512 -> nnl -> 300
[512, 300]: 512 -> nnl -> 300
dimensionality of layers in MLP
nnl: str, optional, default='relu'
which non-linearity to use
device: str, default="cuda:0"
keep on this device
"""
def __init__(self, input_size, hidden_size, nnl='relu', device="cuda:0"):
super(MLP, self).__init__()
hidden_size = self.parse_hidden_size(hidden_size)
assert len(hidden_size) >= 1, "Should contain atleast 1 hidden layer"
hidden_size = [input_size] + hidden_size
self.device = torch.device(device)
layers = []
for i, (i_s, o_s) in enumerate(zip(hidden_size[:-1], hidden_size[1:])):
layers.append(nn.Linear(i_s, o_s, bias=True))
if i < len(hidden_size) - 2:
layers.append(self._get_nnl(nnl))
self.transform = torch.nn.Sequential(*layers)
def parse_hidden_size(self, hidden_size):
if isinstance(hidden_size, int):
return [hidden_size]
elif isinstance(hidden_size, str):
_hidden_size = []
for item in hidden_size.split(","):
_hidden_size.append(int(item))
return _hidden_size
elif isinstance(hidden_size, list):
return hidden_size
else:
raise NotImplementedError("hidden_size must be a int, str or list")
def _get_nnl(self, nnl):
if nnl == 'sigmoid':
return torch.nn.Sigmoid()
elif nnl == 'relu':
return torch.nn.ReLU()
elif nnl == 'gelu':
return torch.nn.GELU()
elif nnl == 'tanh':
return torch.nn.Tanh()
else:
raise NotImplementedError(f"{nnl} not implemented!")
def forward(self, x):
return self.transform(x)
def to(self):
"""Transfer to device
"""
super().to(self.device)
@property
def sparse(self):
return False
| 31.692308
| 79
| 0.580502
|
c396a25090c8a3301520eb55cef07244ae76fa41
| 688
|
py
|
Python
|
cwitune/cwitune/main.py
|
trujunzhang/djzhang-targets
|
c2e327acde9d51f0455e7243f17d93d74b579501
|
[
"MIT"
] | 2
|
2018-12-03T16:30:55.000Z
|
2019-04-03T13:29:20.000Z
|
cwitune/cwitune/main.py
|
trujunzhang/djzhang-targets
|
c2e327acde9d51f0455e7243f17d93d74b579501
|
[
"MIT"
] | null | null | null |
cwitune/cwitune/main.py
|
trujunzhang/djzhang-targets
|
c2e327acde9d51f0455e7243f17d93d74b579501
|
[
"MIT"
] | 1
|
2019-04-03T13:29:25.000Z
|
2019-04-03T13:29:25.000Z
|
from scrapy import cmdline
import os
class Crawler:
## get input ##
filename = "results.json"
def prepare(self):
## delete only if file exists ##
if os.path.exists(self.filename):
os.remove(self.filename)
else:
print("Sorry, I can not remove %s file." % self.filename)
def execute(self, module):
command = ("scrapy crawl %s" % module)
cmdline.execute(command.split())
def main():
utils = Crawler()
utils.prepare()
# utils.execute("itune")
utils.execute("itune_debug")
# utils.execute("itune_browser")
# utils.execute("itune_browser_debug")
if __name__ == '__main__':
main()
| 22.933333
| 69
| 0.604651
|
3a45febe4fb25c3c14bc1d3a37e960cb3abc1217
| 4,531
|
py
|
Python
|
ocu/cache.py
|
albertoleal/open-conference-url
|
b38e8cc8015913eb8562f9ab8970ac3cfd066a43
|
[
"MIT"
] | null | null | null |
ocu/cache.py
|
albertoleal/open-conference-url
|
b38e8cc8015913eb8562f9ab8970ac3cfd066a43
|
[
"MIT"
] | null | null | null |
ocu/cache.py
|
albertoleal/open-conference-url
|
b38e8cc8015913eb8562f9ab8970ac3cfd066a43
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
import os.path
import re
import subprocess
from datetime import datetime
from ocu.prefs import prefs
# Manages storage and retrieval of cached data for this workflow (e.g. calendar
# events, date/time of last run, etc.)
class Cache(object):
# The unique bundle ID of the workflow
workflow_bundle_id = 'com.calebevans.openconferenceurl'
# The directory for (volatile) Alfred workflow cache entries
cache_dir = os.path.expanduser(os.path.join(
'~', 'Library', 'Caches', 'com.runningwithcrayons.Alfred',
'Workflow Data', workflow_bundle_id))
# The file path to the cache store
cache_path = os.path.join(cache_dir, 'event-cache.json')
# The directory containing the workflow's source files
code_dir = os.path.dirname(os.path.realpath(__file__))
def __init__(self):
self.create_cache_dir()
self.map = {}
try:
return self.read()
except (IOError, RuntimeError):
self.refresh(force=True)
return self.read()
# Return the current date as a string (for comparison against the date the
# cache was last refreshed)
def get_current_date(self):
return datetime.now().strftime(prefs.date_format)
# Read the cache JSON into memory
def read(self):
with open(self.cache_path, 'r') as cache_file:
# Make all JSON keys accessible as instance attributes
self.map.update(json.load(cache_file))
# Invalidate the cache at the start of every day
if self.get('last_refresh_date') != self.get_current_date():
# Raising a RuntimeError will run the exception-handling code in
# the constructor
raise RuntimeError('Last refresh date is too old')
# Create the cache directory if it doesn't already exist
def create_cache_dir(self):
try:
os.makedirs(self.cache_dir)
except OSError:
pass
# Retrieve the given key from the cache
def get(self, key):
return self.map.get(key)
# Return True if the given key exists in the map; return False otherwise
def has(self, key):
return key in self.map
# Update the cache with the given key/value pairs
def update(self, attrs):
self.map.update(attrs)
with open(self.cache_path, 'w') as cache_file:
json.dump(self.map, cache_file,
indent=2, separators=(',', ': '))
# Refresh latest calendar event data
def refresh(self, force=False):
event_blobs = re.split(r'(?:^|\n)• ', subprocess.check_output([
'/opt/homebrew/bin/icalBuddy',
# Override the default date/time formats
'--dateFormat',
prefs.date_format,
'--noRelativeDates',
'--timeFormat',
prefs.time_format,
# remove parenthetical calendar names from event titles
'--noCalendarNames',
# Only include the following fields and enforce their order
'--includeEventProps',
','.join(prefs.event_props),
'--propertyOrder',
','.join(prefs.event_props),
'eventsToday+{}'.format(prefs.offset_from_today)
]).decode('utf-8'))
# The first element will always be an empty string, because the bullet
# point we are splitting on is not a delimiter
event_blobs.pop(0)
# Detect when cache data has been updated, or if it has remained the
# same (the event blobs are the only data worth checking)
if force or self.get('event_blobs') != event_blobs:
self.update({
# Cache event blob data for next execution of workflow
'event_blobs': event_blobs,
# Cache the current date so we know when to invalidate the
# cache
'last_refresh_date': self.get_current_date()
})
has_cache_updated = True
else:
has_cache_updated = False
return has_cache_updated
# Queue a refresh of the cache (this will cause Alfred to refresh the
# workflow cache in the background without blocking the execution of this
# script)
def queue_refresh(self):
subprocess.Popen([
'/usr/bin/osascript',
os.path.join(self.code_dir, 'queue_cache_refresh.applescript')
])
cache = Cache()
| 35.124031
| 79
| 0.625028
|
b719f8ebdc3daa1201d97e728fbd0749494ba94e
| 21
|
py
|
Python
|
TEST3D/GUI/0010300_page_active/cleanup.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 31
|
2015-04-01T15:59:36.000Z
|
2022-03-18T20:21:47.000Z
|
TEST3D/GUI/0010300_page_active/cleanup.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 3
|
2015-02-06T19:30:24.000Z
|
2017-05-25T14:14:31.000Z
|
TEST3D/GUI/0010300_page_active/cleanup.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 7
|
2015-01-23T15:19:22.000Z
|
2021-06-09T09:03:59.000Z
|
removefile('aa.log')
| 10.5
| 20
| 0.714286
|
4eff7dafc406c6584618fb03ccc49ee9b17f03dc
| 4,540
|
py
|
Python
|
QR-code-Genrator/main.py
|
dsrao711/Amazing-Python-Scripts
|
4a8bf7bc3d0c6f2c7838d1380c7f9ddbfce766b7
|
[
"MIT"
] | 1
|
2021-04-17T08:33:25.000Z
|
2021-04-17T08:33:25.000Z
|
QR-code-Genrator/main.py
|
dsrao711/Amazing-Python-Scripts
|
4a8bf7bc3d0c6f2c7838d1380c7f9ddbfce766b7
|
[
"MIT"
] | null | null | null |
QR-code-Genrator/main.py
|
dsrao711/Amazing-Python-Scripts
|
4a8bf7bc3d0c6f2c7838d1380c7f9ddbfce766b7
|
[
"MIT"
] | 1
|
2021-07-22T07:06:09.000Z
|
2021-07-22T07:06:09.000Z
|
from tkinter import *
# import os
import qrcode
from PIL import Image, ImageTk
from resizeimage import resizeimage
# QR Code Generator | Designed by Jay Gohel
class Qr_Genrator():
def __init__(self, root):
self.root=root
self.root.title("QR Code Generator")
self.root.geometry('900x500+200+50')
self.root.resizable(False, False)
title = Label(self.root,text=" QR Code Genrator", font=("time new roman",40), bg="#F96900", fg="white", anchor="w").place(x=0,y=0,relwidth=1)
# Variable
self.var_emp_code=StringVar()
self.var_name=StringVar()
self.var_department=StringVar()
self.var_designation=StringVar()
# Employee detail window design
emp_Frame=Frame(self.root,bd=2, relief=RIDGE,bg="white")
emp_Frame.place(x=50, y=100, width=500, height=380)
emp_title = Label(emp_Frame,text=" Employee Details", font=("goudy old style",20), bg="#FB9316", fg="white").place(x=0,y=0,relwidth=1)
lbl_emp_code = Label(emp_Frame,text=" Employee ID", font=("time new roman",15), bg="white").place(x=20,y=60)
lbl_emp_name = Label(emp_Frame,text=" Name", font=("time new roman",15), bg="white").place(x=20,y=100)
lbl_emp_dept = Label(emp_Frame,text=" Department", font=("time new roman",15), bg="white").place(x=20,y=140)
lbl_emp_designation = Label(emp_Frame,text=" Designation", font=("time new roman",15), bg="white").place(x=20,y=180)
text_emp_code = Entry(emp_Frame, font=("time new roman",15), textvariable=self.var_emp_code, bg="lightyellow").place(x=200,y=60)
text_emp_name = Entry(emp_Frame, font=("time new roman",15), textvariable=self.var_name, bg="lightyellow").place(x=200,y=100)
text_emp_dept = Entry(emp_Frame, font=("time new roman",15), textvariable=self.var_department, bg="lightyellow").place(x=200,y=140)
text_emp_designation = Entry(emp_Frame, font=("time new roman",15), textvariable=self.var_designation, bg="lightyellow").place(x=200,y=180)
btn_genrator = Button(emp_Frame, text="QR Genrator", command=self.genrate, font=("time new roman", 15, "bold"), bg="#2196f3", fg="white").place(x=90, y=250, width=180, height="30")
btn_clear = Button(emp_Frame, text="Clear", command=self.clear, font=("time new roman", 15, "bold"), bg="#2196f3", fg="white").place(x=290, y=250, width=120, height="30")
self.msg=""
self.lbl_msg = Label(emp_Frame, text=self.msg, font=("time new roman",15), bg="white", fg="green")
self.lbl_msg.place(x=0,y=320, relwidth=1)
# Qr Code window design
qr_Frame=Frame(self.root,bd=2, relief=RIDGE,bg="white")
qr_Frame.place(x=600, y=100, width=250, height=380)
emp_title = Label(qr_Frame,text="Employee QR code", font=("goudy old style",15), bg="#FB9316", fg="white").place(x=0,y=0,relwidth=1)
self.qr_code = Label(qr_Frame, text="No QR\n available", font=("time new roman",15), bg="#D76C02", fg="white", bd=1, relief=RIDGE)
self.qr_code.place(x=35, y=100, width=180, height=180)
def clear(self):
self.var_emp_code.set('')
self.var_name.set('')
self.var_department.set('')
self.var_designation.set('')
self.msg=""
self.lbl_msg.config(text=self.msg)
self.qr_code.config(image='')
def genrate(self):
if self.var_emp_code.get() == '' or self.var_name.get() == '' or self.var_department.get() == '' or self.var_designation.get() == '':
self.msg="All filed required !!!"
self.lbl_msg.config(text=self.msg, fg="red")
else:
qr_data=(f"Employee Id:{self.var_emp_code.get()}\nEmployee Name:{self.var_name.get()}\nDepartment:{self.var_department.get()}\nDesignation:{self.var_designation.get()}")
qr_code=qrcode.make(qr_data)
# print(qr_code)
qr_code=resizeimage.resize_cover(qr_code,[180,180])
qr_code.save('./QR-code-Genrator/employee_qr/emp_'+str(self.var_emp_code.get()+'.png'))
# qr code img update
self.im=ImageTk.PhotoImage(file='../QR-code-Genrator/employee_qr/emp_'+str(self.var_emp_code.get()+'.png'))
self.qr_code.config(image=self.im)
# updating noti
self.msg="QR genrated Successful!!"
self.lbl_msg.config(text=self.msg, fg="green")
root = Tk()
obj = Qr_Genrator(root)
root.mainloop()
| 50.444444
| 188
| 0.627974
|
e16eab733ff9d39964f8785ec4e9d1d14366fc0b
| 10,720
|
py
|
Python
|
python/modelparsing/gem5.py
|
OleJohanBerg/riscv-custom-extension
|
53d63233bd3c9b7a01bea99916efea63f7ff677f
|
[
"BSD-3-Clause"
] | 2
|
2019-08-21T07:19:35.000Z
|
2021-11-12T05:40:02.000Z
|
python/modelparsing/gem5.py
|
OleJohanBerg/riscv-custom-extension
|
53d63233bd3c9b7a01bea99916efea63f7ff677f
|
[
"BSD-3-Clause"
] | null | null | null |
python/modelparsing/gem5.py
|
OleJohanBerg/riscv-custom-extension
|
53d63233bd3c9b7a01bea99916efea63f7ff677f
|
[
"BSD-3-Clause"
] | 1
|
2019-11-28T21:08:50.000Z
|
2019-11-28T21:08:50.000Z
|
# Copyright (c) 2018 TU Dresden
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Robert Scheffel
import logging
import os
import sys
from mako.template import Template
logger = logging.getLogger(__name__)
class Gem5:
'''
This class builds the code snippets, that are later integrated in the gem5
decoder. It builds a custom decoder depending on the previously parsed
models.
'''
def __init__(self, exts, regs):
self._exts = exts
self._regs = regs
self._decoder = ''
self._gem5_path = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../../..'))
self._gem5_arch_path = os.path.abspath(
os.path.join(
self._gem5_path,
'src/arch'))
self._gem5_ply_path = os.path.join(
self._gem5_path, 'ext/ply')
self._isa_decoder = os.path.abspath(
os.path.join(
self._gem5_arch_path,
'riscv/isa/decoder/rv32.isa'))
assert os.path.exists(self._isa_decoder)
self._buildpath = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../build'))
self._isamain = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../src/isa/main.isa'))
assert os.path.exists(self._isamain)
def restore(self):
'''
Remove the custom extensions from the isa decoder.
Restore the saved decoder.
'''
logger.info('Restore original ISA decoder.')
decoder_old = self._isa_decoder + '_old'
if os.path.exists(decoder_old):
logger.info('Restore contents from file {}'.format(decoder_old))
with open(decoder_old, 'r') as fh:
content = fh.read()
with open(self._isa_decoder, 'w') as fh:
fh.write(content)
logger.info('Original decoder restored')
try:
logger.info('Remove {} from system'.format(decoder_old))
os.remove(decoder_old)
except OSError:
pass
else:
logger.info('Nothing to do')
def extend_gem5(self):
'''
Calls the functions to generate a custom decoder and
patch the necessary files in gem5.
'''
# first: decoder related stuff
self.gen_decoder()
self.gen_cxx_files()
self.create_regsintr()
# self.patch_decoder()
# second: create timings for functional units
self.create_FU_timings()
def gen_decoder(self):
assert os.path.exists(self._buildpath)
assert os.path.exists(self._gem5_arch_path)
# iterate of all custom extensions and generate a custom decoder
# first sort models:
# opcode > funct3 (> funct7)
logger.info('Generate custom decoder from models.')
# sort models
self._exts.models.sort(key=lambda x: (x.opc, x.funct3, x.funct7))
dec_templ = Template(r"""<%
dfn = {}
for model in models:
if model.opc in dfn:
dfn[model.opc].append(model)
else:
dfn[model.opc] = [model]
for opc, mdls in dfn.items():
funct3 = {}
for mdl in mdls:
if mdl.form == 'I':
funct3[mdl.funct3] = mdl
else:
if mdl.funct3 in funct3:
funct3[mdl.funct3].append(mdl)
else:
funct3[mdl.funct3] = [mdl]
dfn[opc] = funct3
%>\
// === AUTO GENERATED FILE ===
% if dfn.items():
decode OPCODE default Unknown::unknown() {
% for opc,funct3_dict in dfn.items():
${hex(opc)}: decode FUNCT3 {
% for funct3, val in funct3_dict.items():
% if type(val) != list:
${hex(funct3)}: I32Op::${val.name}({${val.definition}}, uint32_t, IntCustOp);
% else:
${hex(funct3)}: decode FUNCT7 {
% for mdl in val:
${hex(mdl.funct7)}: R32Op::${mdl.name}({${mdl.definition}}, IntCustOp);
% endfor
}
% endif
% endfor
}
% endfor
}
% else:
decode OPCODE {
default: Unknown::unknown();
}
% endif
""")
self._decoder = dec_templ.render(models=self._exts.models)
logger.debug('custom decoder: \n' + self._decoder)
def gen_cxx_files(self):
# now generate the cxx files using the isa parser
isabuildpath = os.path.join(self._buildpath, 'isa')
if not os.path.exists(isabuildpath):
os.makedirs(isabuildpath)
isafile = os.path.join(isabuildpath, 'custom.isa')
with open(isafile, 'w') as fh:
fh.write(self._decoder)
# create a builddir
gen_build_dir = os.path.join(self._buildpath, 'generated')
if not os.path.exists(gen_build_dir):
os.makedirs(gen_build_dir)
# add some paths to call the gem5 isa parser
sys.path[0:0] = [self._gem5_arch_path]
sys.path[0:0] = [self._gem5_ply_path]
sys.path[0:0] = [os.path.join(self._gem5_path, 'src/python')]
import isa_parser
logger.info('Let gem5 isa_parser generate decoder files')
parser = isa_parser.ISAParser(gen_build_dir)
parser.parse_isa_desc(self._isamain)
def patch_decoder(self):
# patch the gem5 isa decoder
dec_templ = Template(r"""<%
dfn = {}
for model in models:
if model.opc in dfn:
dfn[model.opc].append(model)
else:
dfn[model.opc] = [model]
for opc, mdls in dfn.items():
funct3 = {}
for mdl in mdls:
if mdl.form == 'I':
funct3[mdl.funct3] = mdl
else:
if mdl.funct3 in funct3:
funct3[mdl.funct3].append(mdl)
else:
funct3[mdl.funct3] = [mdl]
dfn[opc] = funct3
%>\
% for opc,funct3_dict in dfn.items():
${hex(opc)}: decode FUNCT3 {
% for funct3, val in funct3_dict.items():
% if type(val) != list:
${hex(funct3)}: I32Op::${val.name}({${val.definition}}, uint32_t, IntCustOp);
% else:
${hex(funct3)}: decode FUNCT7 {
% for mdl in val:
${hex(mdl.funct7)}: R32Op::${mdl.name}({${mdl.definition}}, IntCustOp);
% endfor
}
% endif
% endfor
}
% endfor""")
decoder_patch = dec_templ.render(models=self._exts.models)
# for now: always choose rv32.isa
logger.info("Patch the gem5 isa file " + self._isa_decoder)
with open(self._isa_decoder, 'r') as fh:
content = fh.readlines()
# if not existing
# copy the old .isa file
gem5_isa_old = self._isa_decoder + '_old'
if not os.path.exists(gem5_isa_old):
logger.info('Copy original {}'.format(self._isa_decoder))
with open(gem5_isa_old, 'w') as fh:
data = ''.join(content)
fh.write(data)
line = len(content) - 2
content.insert(line, decoder_patch)
# write back modified content
with open(self._isa_decoder, 'w') as fh:
content = ''.join(content)
fh.write(content)
def create_FU_timings(self):
'''
Retrieve the cycle count information from the models.
Together with the mask and match value, create a timing for
every custom instruction.
'''
assert os.path.exists(self._buildpath)
logger.info("Create custom timing file for Minor CPU.")
timing_templ = Template(r"""<%
%>\
# === AUTO GENERATED FILE ===
from m5.objects import *
% for inst in insts:
class MinorFUTiming${inst.name.title()}(MinorFUTiming):
description = 'Custom${inst.name.title()}'
match = ${hex(inst.matchvalue)}
mask = ${hex(inst.maskvalue)}
srcRegsRelativeLats = [2]
extraCommitLat = ${inst.cycles - 1}
% endfor
custom_timings = [
% for inst in insts:
MinorFUTiming${inst.name.title()}(),
% endfor
]
""")
_FUtimings = timing_templ.render(insts=self._exts.instructions)
pythonbuildpath = os.path.join(self._buildpath, 'python')
if not os.path.exists(pythonbuildpath):
os.makedirs(pythonbuildpath)
timingfile = os.path.join(pythonbuildpath, 'minor_custom_timings.py')
with open(timingfile, 'w') as fh:
fh.write(_FUtimings)
def create_regsintr(self):
'''
A file is needed, that provides functions for accessing
custom registers within the execute function of the
gem5 decoded instruction.
'''
intr_templ = Template(r"""<%
%>\
// === AUTO GENERATED FILE ===
#include <stdint.h>
% for reg, addr in regmap.items():
#define ${reg} ${hex(addr)}
% endfor
#define READ_CUSTOM_REG(reg) \
({uint32_t val; \
val = xc->readMiscReg(reg); \
val;})
#define WRITE_CUSTOM_REG(reg, val) \
(xc->setMiscReg(reg,val))
""")
intr = intr_templ.render(regmap=self._regs.regmap)
genpath = os.path.join(self._buildpath, 'generated')
if not os.path.exists(genpath):
os.makedirs(genpath)
intrfile = os.path.join(genpath, 'regsintr.hh')
with open(intrfile, 'w') as fh:
fh.write(intr)
@property
def decoder(self):
return self._decoder
@property
def extensions(self):
return self._exts
@property
def regs(self):
return self._regs
| 30.197183
| 78
| 0.61903
|
5d716c52ab99682825fe3e3a37dcf9a558cc3543
| 17,362
|
py
|
Python
|
bodyfetcher.py
|
Draakonraev/SmokeDetector
|
a1a1f769eb309d71c6d4869a7a1ddf2220389d6c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
bodyfetcher.py
|
Draakonraev/SmokeDetector
|
a1a1f769eb309d71c6d4869a7a1ddf2220389d6c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
bodyfetcher.py
|
Draakonraev/SmokeDetector
|
a1a1f769eb309d71c6d4869a7a1ddf2220389d6c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# coding=utf-8
from spamhandling import handle_spam, check_if_spam
from datahandling import (add_or_update_api_data, clear_api_data, store_bodyfetcher_queue, store_bodyfetcher_max_ids,
store_queue_timings)
from chatcommunicate import tell_rooms_with
from globalvars import GlobalVars
from operator import itemgetter
from datetime import datetime
import json
import time
import threading
import requests
import copy
from classes import Post, PostParseError
from helpers import log
from itertools import chain
# noinspection PyClassHasNoInit,PyBroadException
class BodyFetcher:
queue = {}
previous_max_ids = {}
queue_timings = {}
special_cases = {
"pt.stackoverflow.com": 10,
"ru.stackoverflow.com": 10,
"blender.stackexchange.com": 5,
"codereview.stackexchange.com": 5,
"es.stackoverflow.com": 5,
"stackoverflow.com": 3,
"stats.stackexchange.com": 5,
"tex.stackexchange.com": 5,
"magento.stackexchange.com": 3,
"gis.stackexchange.com": 3
}
time_sensitive = ["security.stackexchange.com", "movies.stackexchange.com",
"mathoverflow.net", "gaming.stackexchange.com", "webmasters.stackexchange.com",
"arduino.stackexchange.com", "workplace.stackexchange.com"]
threshold = 1
last_activity_date = 0
api_data_lock = threading.Lock()
queue_modify_lock = threading.Lock()
max_ids_modify_lock = threading.Lock()
queue_timing_modify_lock = threading.Lock()
def add_to_queue(self, post, should_check_site=False):
try:
d = json.loads(json.loads(post)["data"])
except ValueError:
# post didn't contain a valid JSON object in its ["data"] member
# indicative of a server-side socket reset
return
site_base = d["siteBaseHostAddress"]
post_id = d["id"]
if (post_id == 3122 or post_id == 51812) and site_base == "meta.stackexchange.com":
return # don't check meta sandbox, it's full of weird posts
with self.queue_modify_lock:
if site_base not in self.queue:
self.queue[site_base] = {}
# Something about how the queue is being filled is storing Post IDs in a list.
# So, if we get here we need to make sure that the correct types are paseed.
#
# If the item in self.queue[site_base] is a dict, do nothing.
# If the item in self.queue[site_base] is not a dict but is a list or a tuple, then convert to dict and
# then replace the list or tuple with the dict.
# If the item in self.queue[site_base] is neither a dict or a list, then explode.
if type(self.queue[site_base]) is dict:
pass
elif type(self.queue[site_base]) is not dict and type(self.queue[site_base]) in [list, tuple]:
post_list_dict = {}
for post_list_id in self.queue[site_base]:
post_list_dict[post_list_id] = None
self.queue[site_base] = post_list_dict
else:
raise TypeError("A non-iterable is in the queue item for a given site, this will cause errors!")
# This line only works if we are using a dict in the self.queue[site_base] object, which we should be with
# the previous conversion code.
self.queue[site_base][str(post_id)] = datetime.utcnow()
if GlobalVars.flovis is not None:
GlobalVars.flovis.stage('bodyfetcher/enqueued', site_base, post_id,
{sk: list(sq.keys()) for sk, sq in self.queue.items()})
if should_check_site:
self.make_api_call_for_site(site_base)
else:
self.check_queue()
return
def check_queue(self):
for site, values in self.queue.items():
if site in self.special_cases:
if len(values) >= self.special_cases[site]:
self.make_api_call_for_site(site)
return
if site in self.time_sensitive:
if len(values) >= 1 and datetime.utcnow().hour in range(4, 12):
self.make_api_call_for_site(site)
return
# if we don't have any sites with their queue filled, take the first one without a special case
for site, values in self.queue.items():
if site not in self.special_cases and len(values) >= self.threshold:
self.make_api_call_for_site(site)
return
# We're not making an API request, so explicitly store the queue
with self.queue_modify_lock:
store_bodyfetcher_queue()
def print_queue(self):
return '\n'.join(["{0}: {1}".format(key, str(len(values))) for (key, values) in self.queue.items()])
def make_api_call_for_site(self, site):
if site not in self.queue:
return
with self.queue_modify_lock:
new_posts = self.queue.pop(site)
store_bodyfetcher_queue()
new_post_ids = [int(k) for k in new_posts.keys()]
if GlobalVars.flovis is not None:
for post_id in new_post_ids:
GlobalVars.flovis.stage('bodyfetcher/api_request', site, post_id,
{'site': site, 'posts': list(new_posts.keys())})
with self.queue_timing_modify_lock:
post_add_times = [v for k, v in new_posts.items()]
pop_time = datetime.utcnow()
for add_time in post_add_times:
try:
seconds_in_queue = (pop_time - add_time).total_seconds()
if site in self.queue_timings:
self.queue_timings[site].append(seconds_in_queue)
else:
self.queue_timings[site] = [seconds_in_queue]
except KeyError: # XXX: Any other possible exception?
continue # Skip to next item if we've got invalid data or missing values.
store_queue_timings()
with self.max_ids_modify_lock:
if site in self.previous_max_ids and max(new_post_ids) > self.previous_max_ids[site]:
previous_max_id = self.previous_max_ids[site]
intermediate_posts = range(previous_max_id + 1, max(new_post_ids))
# We don't want to go over the 100-post API cutoff, so take the last
# (100-len(new_post_ids)) from intermediate_posts
intermediate_posts = intermediate_posts[(100 - len(new_post_ids)):]
# new_post_ids could contain edited posts, so merge it back in
combined = chain(intermediate_posts, new_post_ids)
# Could be duplicates, so uniquify
posts = list(set(combined))
else:
posts = new_post_ids
try:
if max(new_post_ids) > self.previous_max_ids[site]:
self.previous_max_ids[site] = max(new_post_ids)
store_bodyfetcher_max_ids()
except KeyError:
self.previous_max_ids[site] = max(new_post_ids)
store_bodyfetcher_max_ids()
log('debug', "New IDs / Hybrid Intermediate IDs for {}:".format(site))
if len(new_post_ids) > 30:
log('debug', "{} +{} more".format(sorted(new_post_ids)[:30], len(new_post_ids) - 30))
else:
log('debug', sorted(new_post_ids))
if len(new_post_ids) == len(posts):
log('debug', "[ *Identical* ]")
elif len(posts) > 30:
log('debug', "{} +{} more".format(sorted(posts)[:30], len(posts) - 30))
else:
log('debug', sorted(posts))
question_modifier = ""
pagesize_modifier = {}
if site == "stackoverflow.com":
# Not all SO questions are shown in the realtime feed. We now
# fetch all recently modified SO questions to work around that.
if self.last_activity_date != 0:
pagesize = "50"
else:
pagesize = "25"
pagesize_modifier = {
'pagesize': pagesize,
'min': str(self.last_activity_date)
}
else:
question_modifier = "/{0}".format(";".join([str(post) for post in posts]))
url = "https://api.stackexchange.com/2.2/questions{}".format(question_modifier)
params = {
'filter': '!*xq08dCDNr)PlxxXfaN8ntivx(BPlY_8XASyXLX-J7F-)VK*Q3KTJVkvp*',
'key': 'IAkbitmze4B8KpacUfLqkw((',
'site': site
}
params.update(pagesize_modifier)
# wait to make sure API has/updates post data
time.sleep(3)
with GlobalVars.api_request_lock:
# Respect backoff, if we were given one
if GlobalVars.api_backoff_time > time.time():
time.sleep(GlobalVars.api_backoff_time - time.time() + 2)
try:
time_request_made = datetime.utcnow().strftime('%H:%M:%S')
response = requests.get(url, params=params, timeout=20).json()
except (requests.exceptions.Timeout, requests.ConnectionError, Exception):
# Any failure in the request being made (timeout or otherwise) should be added back to
# the queue.
with self.queue_modify_lock:
if site in self.queue:
self.queue[site].update(new_posts)
else:
self.queue[site] = new_posts
return
with self.api_data_lock:
add_or_update_api_data(site)
message_hq = ""
with GlobalVars.apiquota_rw_lock:
if "quota_remaining" in response:
if response["quota_remaining"] - GlobalVars.apiquota >= 5000 and GlobalVars.apiquota >= 0:
tell_rooms_with("debug", "API quota rolled over with {0} requests remaining. "
"Current quota: {1}.".format(GlobalVars.apiquota,
response["quota_remaining"]))
sorted_calls_per_site = sorted(GlobalVars.api_calls_per_site.items(), key=itemgetter(1),
reverse=True)
api_quota_used_per_site = ""
for site_name, quota_used in sorted_calls_per_site:
sanatized_site_name = site_name.replace('.com', '').replace('.stackexchange', '')
api_quota_used_per_site += sanatized_site_name + ": {0}\n".format(str(quota_used))
api_quota_used_per_site = api_quota_used_per_site.strip()
tell_rooms_with("debug", api_quota_used_per_site)
clear_api_data()
if response["quota_remaining"] == 0:
tell_rooms_with("debug", "API reports no quota left! May be a glitch.")
tell_rooms_with("debug", str(response)) # No code format for now?
if GlobalVars.apiquota == -1:
tell_rooms_with("debug", "Restart: API quota is {quota}."
.format(quota=response["quota_remaining"]))
GlobalVars.apiquota = response["quota_remaining"]
else:
message_hq = "The quota_remaining property was not in the API response."
if "error_message" in response:
message_hq += " Error: {} at {} UTC.".format(response["error_message"], time_request_made)
if "error_id" in response and response["error_id"] == 502:
if GlobalVars.api_backoff_time < time.time() + 12: # Add a backoff of 10 + 2 seconds as a default
GlobalVars.api_backoff_time = time.time() + 12
message_hq += " Backing off on requests for the next 12 seconds."
message_hq += " Previous URL: `{}`".format(url)
if "backoff" in response:
if GlobalVars.api_backoff_time < time.time() + response["backoff"]:
GlobalVars.api_backoff_time = time.time() + response["backoff"]
if len(message_hq) > 0 and "site is required" not in message_hq:
tell_rooms_with("debug", message_hq.strip())
if "items" not in response:
return
if site == "stackoverflow.com":
items = response["items"]
if len(items) > 0 and "last_activity_date" in items[0]:
self.last_activity_date = items[0]["last_activity_date"]
num_scanned = 0
start_time = time.time()
for post in response["items"]:
pnb = copy.deepcopy(post)
if 'body' in pnb:
pnb['body'] = 'Present, but truncated'
if 'answers' in pnb:
del pnb['answers']
if "title" not in post or "body" not in post:
if GlobalVars.flovis is not None and 'question_id' in post:
GlobalVars.flovis.stage('bodyfetcher/api_response/no_content', site, post['question_id'], pnb)
continue
post['site'] = site
try:
post['edited'] = (post['creation_date'] != post['last_edit_date'])
except KeyError:
post['edited'] = False # last_edit_date not present = not edited
try:
post_ = Post(api_response=post)
except PostParseError as err:
log('error', 'Error {0} when parsing post: {1!r}'.format(err, post_))
if GlobalVars.flovis is not None and 'question_id' in post:
GlobalVars.flovis.stage('bodyfetcher/api_response/error', site, post['question_id'], pnb)
continue
num_scanned += 1
is_spam, reason, why = check_if_spam(post_)
if is_spam:
try:
if GlobalVars.flovis is not None and 'question_id' in post:
GlobalVars.flovis.stage('bodyfetcher/api_response/spam', site, post['question_id'],
{'post': pnb, 'check_if_spam': [is_spam, reason, why]})
handle_spam(post=post_,
reasons=reason,
why=why)
except Exception as e:
log('error', "Exception in handle_spam:", e)
elif GlobalVars.flovis is not None and 'question_id' in post:
GlobalVars.flovis.stage('bodyfetcher/api_response/not_spam', site, post['question_id'],
{'post': pnb, 'check_if_spam': [is_spam, reason, why]})
try:
if "answers" not in post:
pass
else:
for answer in post["answers"]:
anb = copy.deepcopy(answer)
if 'body' in anb:
anb['body'] = 'Present, but truncated'
num_scanned += 1
answer["IsAnswer"] = True # Necesssary for Post object
answer["title"] = "" # Necessary for proper Post object creation
answer["site"] = site # Necessary for proper Post object creation
try:
answer['edited'] = (answer['creation_date'] != answer['last_edit_date'])
except KeyError:
answer['edited'] = False # last_edit_date not present = not edited
answer_ = Post(api_response=answer, parent=post_)
is_spam, reason, why = check_if_spam(answer_)
if is_spam:
try:
if GlobalVars.flovis is not None and 'answer_id' in answer:
GlobalVars.flovis.stage('bodyfetcher/api_response/spam', site, answer['answer_id'],
{'post': anb, 'check_if_spam': [is_spam, reason, why]})
handle_spam(answer_,
reasons=reason,
why=why)
except Exception as e:
log('error', "Exception in handle_spam:", e)
elif GlobalVars.flovis is not None and 'answer_id' in answer:
GlobalVars.flovis.stage('bodyfetcher/api_response/not_spam', site, answer['answer_id'],
{'post': anb, 'check_if_spam': [is_spam, reason, why]})
except Exception as e:
log('error', "Exception handling answers:", e)
end_time = time.time()
scan_time = end_time - start_time
GlobalVars.PostScanStat.add_stat(num_scanned, scan_time)
return
| 45.810026
| 119
| 0.548439
|
2e060fa9fb4b208dccd6c75dfc83d52e9f5aad4e
| 3,943
|
py
|
Python
|
radnlp/split.py
|
chapmanbe/RadNLP
|
7d27b53a75ed7c4498a649c94d2eed11f3024054
|
[
"Apache-2.0"
] | 10
|
2015-08-17T14:51:36.000Z
|
2021-04-27T03:49:23.000Z
|
radnlp/split.py
|
chapmanbe/RadNLP
|
7d27b53a75ed7c4498a649c94d2eed11f3024054
|
[
"Apache-2.0"
] | null | null | null |
radnlp/split.py
|
chapmanbe/RadNLP
|
7d27b53a75ed7c4498a649c94d2eed11f3024054
|
[
"Apache-2.0"
] | 6
|
2016-12-02T22:06:53.000Z
|
2020-08-31T14:33:34.000Z
|
"""
Tools for splitting report and recognizing sections
"""
import re
from textblob import TextBlob
r_headings = re.compile(r"""(?P<heading>([A-Z ]+\s)?[A-Z()]+:)""")
r_digits = re.compile(r"""\d\.""")
r_enumerate = re.compile(r"""((\d(.|:|\))\s)(.+)(?=\n))""")
canned_phrases = ("if you have any question about this report, contact me at:",
"i have personally reviewed the images and agree",
"i have personally reviewed the images for this examination",
"please utilize the following guidance for the management of these patients",
"please utilize the following guidance for management",
"i have reviewed the images and approve this final report",
"the imaging exam has been reviewed and the report has been issued by a radiologist physician",
)
def terminate_lists(txt):
"""
Replace enumerated lists that don't end with a period/question mark
"""
lists = r_enumerate.findall(txt)
for l in lists:
txt = txt.replace(l[0],"%s."%l[0])
return txt
def get_headings(txt):
"""
"""
global r_headings
return [r.group("heading").strip() for r in r_headings.finditer(txt)]
def find_terminating_sentences(txt,phrases=None):
"""
"""
try:
if not phrases:
phrases = ()
indices = [i for i in [txt.lower().find(p) for p in phrases] if i != -1]
return min(indices)
except:
return None
def get_report(txt,terminating_phrases= None):
"""
get report text up to any terminating_phrases
"""
if not terminating_phrases:
terminating_phrases = canned_phrases
ti = find_terminating_sentences(txt,terminating_phrases)
global r_digits
if ti != -1:
return r_digits.sub("",txt[:ti])
else:
return r_digits.sub("",txt)
def preprocess_report(txt,
fix_enumerated_lists=True,
drop_boiler_plate=True,
canned_phrases=None):
"""
from a string (txt) containing a report, return the relevant
portions
fix_enumerated_lists: boolean.
If true, use regular expressions to transform enumerated lists into more sentence like structures.
drop_boiler_plate: boolean.
If true, only return text preceding canned_phrases
canned_phrases: None or list.
List of canned phrases to exlcude as boilerplate
"""
if fix_enumerated_lists:
txt = terminate_lists(txt)
if drop_boiler_plate:
txt = get_report(txt,terminating_phrases=canned_phrases)
return txt
def grab_impression(txt):
"""
grab impression from text via looking for IMPRESSION:
"""
try:
return txt[txt.index("IMPRESSION:"):].split("IMPRESSION:")[1]
except Exception as error:
print(error)
return ""
def get_sections(txt, headings):
h = headings.pop()
def get_section(txt,heading):
try:
loc = txt.index(heading)
return txt[:loc], txt[loc:].split(heading)[1].strip()
except Exception as error:
#print(error, heading, txt)
return txt, ""
def get_sections_by_headings(txt, sections, headings):
if not headings:
return txt, sections
else:
h = headings.pop()
txt, sections[h] = get_section(txt, h)
return get_sections_by_headings(txt, sections, headings)
def reverse_sections(secs):
_secs = OrderedDict()
while secs:
sec,txt = secs.popitem()
_secs[sec] = txt
return _secs
def split_report(txt):
headings = get_headings(txt)
txt, secs = get_sections_by_headings(txt, OrderedDict(), headings)
return reverse_sections(secs)
def get_sentences(report):
"""
Wrapper around TextBlob
generates a TextBlob instance from report and
returns the sentences
"""
return [s.raw for s in TextBlob(report).sentences]
| 29.207407
| 112
| 0.632767
|
941069669f890523139642f64c081ec9cc0b5711
| 1,163
|
py
|
Python
|
tests/permifrost_test_utils/snowflake_connector.py
|
kouk/permifrost
|
713aee06c287ba128032c03eead3469a79d90560
|
[
"MIT"
] | null | null | null |
tests/permifrost_test_utils/snowflake_connector.py
|
kouk/permifrost
|
713aee06c287ba128032c03eead3469a79d90560
|
[
"MIT"
] | null | null | null |
tests/permifrost_test_utils/snowflake_connector.py
|
kouk/permifrost
|
713aee06c287ba128032c03eead3469a79d90560
|
[
"MIT"
] | null | null | null |
from typing import Dict, List, Any
class MockSnowflakeConnector:
def show_databases(self) -> List[str]:
return []
def show_warehouses(self) -> List[str]:
return []
def show_roles(self) -> Dict[str, str]:
return {}
def show_users(self) -> List[str]:
return []
def show_schemas(self, database: str = None) -> List[str]:
return []
def show_tables(self, database: str = None, schema: str = None) -> List[str]:
return []
def show_views(self, database: str = None, schema: str = None) -> List[str]:
return []
def show_future_grants(self, database: str = None, schema: str = None) -> List[str]:
return []
def show_grants_to_role(self, role) -> Dict[str, Any]:
return {}
def show_grants_to_role_with_grant_option(self, role) -> Dict[str, Any]:
return {}
def show_roles_granted_to_user(self, user) -> List[str]:
return []
def get_current_user(self) -> str:
return ""
def get_current_role(self) -> str:
return "securityadmin"
def full_schema_list(self, schema: str) -> List[str]:
return []
| 25.282609
| 88
| 0.600172
|
c18fbac0413798c0a6e8f510b382979a64a63ffa
| 3,268
|
py
|
Python
|
tests/storages_tests/rdb_tests/test_with_server.py
|
Jeyhooon/optuna
|
0a5560cd0c8e83fe03f63ab431a513bf893f7d4d
|
[
"MIT"
] | 1
|
2019-05-28T07:29:49.000Z
|
2019-05-28T07:29:49.000Z
|
tests/storages_tests/rdb_tests/test_with_server.py
|
nabenabe0928/optuna
|
aa505125de8515518fe19ba227edf7a1d3f8ebda
|
[
"MIT"
] | null | null | null |
tests/storages_tests/rdb_tests/test_with_server.py
|
nabenabe0928/optuna
|
aa505125de8515518fe19ba227edf7a1d3f8ebda
|
[
"MIT"
] | 2
|
2020-03-03T00:40:28.000Z
|
2021-01-28T11:54:32.000Z
|
from multiprocessing import Pool
import os
from typing import Sequence
from typing import Tuple
import numpy as np
import pytest
import optuna
_STUDY_NAME = "_test_multiprocess"
def f(x: float, y: float) -> float:
return (x - 3) ** 2 + y
def objective(trial: optuna.Trial) -> float:
x = trial.suggest_float("x", -10, 10)
y = trial.suggest_float("y", -10, 10)
trial.report(x, 0)
trial.report(y, 1)
trial.set_user_attr("x", x)
trial.set_system_attr("y", y)
return f(x, y)
def run_optimize(args: Tuple[str, str]) -> None:
study_name = args[0]
storage_url = args[1]
# Create a study
study = optuna.create_study(study_name=study_name, storage=storage_url, load_if_exists=True)
# Run optimization
study.optimize(objective, n_trials=20)
@pytest.fixture
def storage_url() -> str:
if "TEST_DB_URL" not in os.environ:
pytest.skip("This test requires TEST_DB_URL.")
storage_url = os.environ["TEST_DB_URL"]
try:
optuna.study.delete_study(_STUDY_NAME, storage_url)
except KeyError:
pass
return storage_url
def _check_trials(trials: Sequence[optuna.trial.FrozenTrial]) -> None:
# Check trial states.
assert all(trial.state == optuna.trial.TrialState.COMPLETE for trial in trials)
# Check trial values and params.
assert all("x" in trial.params for trial in trials)
assert all("y" in trial.params for trial in trials)
assert all(
np.isclose(
[trial.value for trial in trials],
[f(trial.params["x"], trial.params["y"]) for trial in trials],
atol=1e-4,
)
)
# Check intermediate values.
assert all(len(trial.intermediate_values) == 2 for trial in trials)
assert all(trial.params["x"] == trial.intermediate_values[0] for trial in trials)
assert all(trial.params["y"] == trial.intermediate_values[1] for trial in trials)
# Check attrs.
assert all(
np.isclose(
[trial.user_attrs["x"] for trial in trials],
[trial.params["x"] for trial in trials],
atol=1e-4,
)
)
assert all(
np.isclose(
[trial.system_attrs["y"] for trial in trials],
[trial.params["y"] for trial in trials],
atol=1e-4,
)
)
def test_loaded_trials(storage_url: str) -> None:
# Please create the tables by placing this function before the multi-process tests.
N_TRIALS = 20
study = optuna.create_study(study_name=_STUDY_NAME, storage=storage_url)
# Run optimization
study.optimize(objective, n_trials=N_TRIALS)
trials = study.trials
assert len(trials) == N_TRIALS
_check_trials(trials)
# Create a new study to confirm the study can load trial properly.
loaded_study = optuna.load_study(study_name=_STUDY_NAME, storage=storage_url)
_check_trials(loaded_study.trials)
def test_multiprocess(storage_url: str) -> None:
n_workers = 8
study_name = _STUDY_NAME
with Pool(n_workers) as pool:
pool.map(run_optimize, [(study_name, storage_url)] * n_workers)
study = optuna.load_study(study_name=study_name, storage=storage_url)
trials = study.trials
assert len(trials) == n_workers * 20
_check_trials(trials)
| 28.172414
| 96
| 0.662791
|
d526a43cb308fbf6437ad3f2d89855cc791a7b27
| 2,017
|
py
|
Python
|
deepchem/feat/tests/test_one_hot_featurizer.py
|
cjgalvin/deepchem
|
64993a129e7f0f78fed9500298b1828ac8a0757a
|
[
"MIT"
] | 3
|
2019-05-29T19:18:25.000Z
|
2021-01-25T05:44:05.000Z
|
deepchem/feat/tests/test_one_hot_featurizer.py
|
cjgalvin/deepchem
|
64993a129e7f0f78fed9500298b1828ac8a0757a
|
[
"MIT"
] | 10
|
2017-02-23T19:39:22.000Z
|
2017-08-31T22:21:18.000Z
|
deepchem/feat/tests/test_one_hot_featurizer.py
|
cjgalvin/deepchem
|
64993a129e7f0f78fed9500298b1828ac8a0757a
|
[
"MIT"
] | 2
|
2019-11-20T03:21:14.000Z
|
2020-03-21T17:26:33.000Z
|
import unittest
import numpy as np
from deepchem.feat import OneHotFeaturizer
from deepchem.feat.molecule_featurizers.one_hot_featurizer import ZINC_CHARSET
class TestOneHotFeaturizert(unittest.TestCase):
"""
Test OneHotFeaturizer.
"""
def test_onehot_featurizer(self):
"""
Test simple one hot encoding.
"""
from rdkit import Chem
length = len(ZINC_CHARSET) + 1
smiles = 'CC(=O)Oc1ccccc1C(=O)O'
mol = Chem.MolFromSmiles(smiles)
featurizer = OneHotFeaturizer()
feature = featurizer([mol])
assert feature.shape == (1, 100, length)
# untranform
undo_smiles = featurizer.untransform(feature[0])
assert smiles == undo_smiles
def test_onehot_featurizer_with_max_length(self):
"""
Test one hot encoding with max_length.
"""
from rdkit import Chem
length = len(ZINC_CHARSET) + 1
smiles = 'CC(=O)Oc1ccccc1C(=O)O'
mol = Chem.MolFromSmiles(smiles)
featurizer = OneHotFeaturizer(max_length=120)
feature = featurizer([mol])
assert feature.shape == (1, 120, length)
# untranform
undo_smiles = featurizer.untransform(feature[0])
assert smiles == undo_smiles
def test_correct_transformation(self):
"""
Test correct one hot encoding.
"""
from rdkit import Chem
charset = ['C', 'N', '=', ')', '(', 'O']
smiles = 'CN=C=O'
mol = Chem.MolFromSmiles(smiles)
featurizer = OneHotFeaturizer(charset=charset, max_length=100)
feature = featurizer([mol])
assert np.allclose(feature[0][0], np.array([1, 0, 0, 0, 0, 0, 0]))
assert np.allclose(feature[0][1], np.array([0, 1, 0, 0, 0, 0, 0]))
assert np.allclose(feature[0][2], np.array([0, 0, 1, 0, 0, 0, 0]))
assert np.allclose(feature[0][3], np.array([1, 0, 0, 0, 0, 0, 0]))
assert np.allclose(feature[0][4], np.array([0, 0, 1, 0, 0, 0, 0]))
assert np.allclose(feature[0][5], np.array([0, 0, 0, 0, 0, 1, 0]))
# untranform
undo_smiles = featurizer.untransform(feature[0])
assert smiles == undo_smiles
| 31.030769
| 78
| 0.653446
|
6a71757ab10c02ed703536b4da39a7a31ae4d827
| 536
|
py
|
Python
|
photos/urls.py
|
NDOLIC/Gallery
|
697719df6f49b1220ff8ca17a65d757553d7421a
|
[
"MIT"
] | null | null | null |
photos/urls.py
|
NDOLIC/Gallery
|
697719df6f49b1220ff8ca17a65d757553d7421a
|
[
"MIT"
] | null | null | null |
photos/urls.py
|
NDOLIC/Gallery
|
697719df6f49b1220ff8ca17a65d757553d7421a
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
# url('^$',views.photos_of_day,name = 'welcome'),
url('^$',views.photos_of_day,name='photosToday'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^location/(\d+)', views.location, name='location'),
url(r'^share/(\d+)', views.share, name='share')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 35.733333
| 81
| 0.70709
|
89f32f93a32459c89f13285a3cc96e46bba81509
| 8,110
|
py
|
Python
|
contrib/devtools/update-translations.py
|
coiner7/whatcoin
|
a99fbb704ef0b370da4896256bacd04def1a6d0c
|
[
"MIT"
] | 10
|
2021-07-04T03:27:19.000Z
|
2021-10-02T07:41:50.000Z
|
contrib/devtools/update-translations.py
|
coiner7/whatcoin
|
a99fbb704ef0b370da4896256bacd04def1a6d0c
|
[
"MIT"
] | 2
|
2021-08-07T10:09:49.000Z
|
2021-10-01T04:45:07.000Z
|
contrib/devtools/update-translations.py
|
coiner7/whatcoin
|
a99fbb704ef0b370da4896256bacd04def1a6d0c
|
[
"MIT"
] | 7
|
2021-07-06T13:22:31.000Z
|
2021-11-29T21:44:49.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'whatcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
| 38.436019
| 124
| 0.632306
|
2c3d86c73a1148cac5cfa355cac9beed927688bf
| 723
|
py
|
Python
|
AsciiBot_Example/dots/getchar.py
|
naturecodevoid/RLBotAsciiDotsExample
|
b73f9b55bd907b66dc0ef21ce33ee421f19d95d6
|
[
"MIT"
] | 3
|
2019-07-03T15:11:17.000Z
|
2019-07-26T03:18:47.000Z
|
AsciiBot_Example/dots/getchar.py
|
naturecodevoid/RLBotAsciiDotsExample
|
b73f9b55bd907b66dc0ef21ce33ee421f19d95d6
|
[
"MIT"
] | 1
|
2019-08-04T20:17:53.000Z
|
2019-08-04T20:17:53.000Z
|
AsciiBot_Example/dots/getchar.py
|
naturecodevoid/RLBotAsciiDotsExample
|
b73f9b55bd907b66dc0ef21ce33ee421f19d95d6
|
[
"MIT"
] | 2
|
2019-08-04T20:09:11.000Z
|
2019-08-04T20:13:38.000Z
|
def _find_getch():
try:
import termios
except ImportError:
# Non-POSIX. Return msvcrt's (Windows') getch.
import msvcrt
return msvcrt.getch
# POSIX system. Create and return a getch that manipulates the tty.
import sys, tty
def _getch():
fd = sys.stdin.fileno()
old_settings = None
try:
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
except termios.error:
pass
try:
ch = sys.stdin.read(1)
finally:
if old_settings is not None:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
return _getch
getch = _find_getch()
| 25.821429
| 71
| 0.568465
|
cbb933282a395d82060f6fb9e981c05d8d376f10
| 19,426
|
py
|
Python
|
io_import_vmf/import_qc.py
|
lasa01/io_import_vmf
|
3341b8e2d0be77cba8f3ec30812f6c859f9b9a83
|
[
"MIT"
] | 153
|
2020-04-19T20:57:00.000Z
|
2022-03-30T18:36:27.000Z
|
io_import_vmf/import_qc.py
|
lasa01/io_import_vmf
|
3341b8e2d0be77cba8f3ec30812f6c859f9b9a83
|
[
"MIT"
] | 112
|
2020-04-20T08:20:30.000Z
|
2022-03-14T00:59:33.000Z
|
io_import_vmf/import_qc.py
|
lasa01/io_import_vmf
|
3341b8e2d0be77cba8f3ec30812f6c859f9b9a83
|
[
"MIT"
] | 23
|
2020-04-28T18:41:00.000Z
|
2022-03-28T12:58:09.000Z
|
from io_scene_valvesource import import_smd, utils
from .utils import find_armature_modifier, truncate_name
from vmfpy.fs import VMFFileSystem, vmf_path
from vmfpy.vmt import VMT
import re
from collections import defaultdict
from typing import DefaultDict, Dict, Any, NamedTuple, Tuple, Set, Optional, Callable, TYPE_CHECKING
import bpy
import os
from os.path import splitext, basename, dirname, isfile, isdir, isabs, join, relpath
import subprocess
import sys
import platform
from contextlib import redirect_stdout
from io import StringIO
import time
import traceback
_CDMATERIALS_REGEX = re.compile(r'\$CDMaterials[ \t]+"([^"\n]+)"', re.IGNORECASE)
_IS_WINDOWS = platform.system() == "Windows"
_CROWBARCMD_PATH = join(dirname(__file__), "bin", "CrowbarCommandLineDecomp.exe" if _IS_WINDOWS else "crowbar.dll")
if TYPE_CHECKING:
from . import import_vmt # noqa: F401
def crowbar_command(full_mdl_path: str, out_path: str) -> Tuple[str, ...]:
if _IS_WINDOWS:
return (
_CROWBARCMD_PATH,
"-p", full_mdl_path,
"-o", out_path,
)
else:
return (
"dotnet", _CROWBARCMD_PATH,
full_mdl_path,
out_path,
)
class FakeSmd():
def __init__(self, armature: bpy.types.Object, bone_id_map: Dict[int, str]):
self.a = armature
self.boneIDs = bone_id_map
def copy(self) -> 'FakeSmd':
return FakeSmd(self.a, self.boneIDs)
@staticmethod
def from_bst(smd: Any) -> 'FakeSmd':
if smd is None:
raise Exception("nothing was imported by Blender Source Tools")
if not isinstance(smd.a, bpy.types.Object) or not isinstance(smd.boneIDs, dict):
raise Exception("unexpected Blender Source Tools data format (unsupported version?)")
return FakeSmd(smd.a, smd.boneIDs)
class SmdImporterWrapper(import_smd.SmdImporter):
bl_idname = "import_scene._io_import_vmf_smd_wrapper"
filepath: bpy.props.StringProperty() # type: ignore
append: bpy.props.StringProperty(default='APPEND') # type: ignore
boneMode: bpy.props.StringProperty(default='NONE') # type: ignore
createCollections: bpy.props.BoolProperty(default=False) # type: ignore
skip_collision: bpy.props.BoolProperty(default=True) # type: ignore
skip_lod: bpy.props.BoolProperty(default=True) # type: ignore
skip_anim: bpy.props.BoolProperty(default=False) # type: ignore
vmt_importer: Optional[Any]
vmf_fs: VMFFileSystem
collection: bpy.types.Collection
root: str
name: str
full_name: str
def execute(self, context: bpy.types.Context) -> set:
self.existingBones = [] # type: ignore
self.num_files_imported = 0
self._missing_materials: Set[str] = set()
self._cdmaterials = [vmf_path("")]
SmdImporterWrapper.smd = None
# figure what the material dir should be for the qc
with open(self.filepath, 'r') as fp:
content = fp.read()
for match in _CDMATERIALS_REGEX.finditer(content):
self._cdmaterials.append(vmf_path(match.group(1)))
animations = False if self.skip_anim else "$staticprop" not in content.lower()
self.readQC(self.filepath, False, animations, False, 'QUATERNION', outer_qc=True)
return {'FINISHED'}
def readQC(self, filepath: str, newscene: bool, doAnim: bool,
makeCamera: bool, rotMode: str, outer_qc: bool = False) -> int:
if outer_qc:
self.qc = utils.QcInfo()
self.qc.startTime = time.time()
self.qc.jobName = SmdImporterWrapper.name
self.qc.root_filedir = dirname(filepath)
self.qc.makeCamera = makeCamera
self.qc.animation_names = []
return super().readQC(filepath, newscene, doAnim, makeCamera, rotMode, False)
def createArmature(self, armature_name: str) -> bpy.types.Object:
if armature_name.endswith("_skeleton"):
armature_name = armature_name[:-9]
return super().createArmature(armature_name)
def initSMD(self, filepath: str, smd_type: str, upAxis: str, rotMode: str, target_layer: int) -> Any:
smd = super().initSMD(filepath, smd_type, upAxis, rotMode, target_layer)
smd.jobName = truncate_name(splitext(relpath(filepath, SmdImporterWrapper.root))[0])
return smd
def readSMD(self, filepath: str, upAxis: str, rotMode: str,
newscene: bool = False, smd_type: Any = None, target_layer: int = 0) -> int:
if self.skip_collision and smd_type == utils.PHYS: # skip collision meshes
return 0
filepath_without_ext = splitext(filepath)[0].replace("\\", "/")
if self.skip_lod and (filepath_without_ext.rstrip("123456789").endswith("_lod")
and not filepath_without_ext.endswith(SmdImporterWrapper.full_name)): # skip lod meshes
return 0
result = super().readSMD(filepath, upAxis, rotMode, newscene, smd_type, target_layer)
if self.smd.g and self.smd.g != self.collection:
smd_collection: bpy.types.Collection = self.smd.g
while smd_collection.objects:
if smd_collection.objects[0].name not in self.collection.objects:
self.collection.objects.link(smd_collection.objects[0])
smd_collection.objects.unlink(smd_collection.objects[0])
bpy.data.collections.remove(smd_collection)
if result != 0:
SmdImporterWrapper.smd = self.smd
return result
# properly import materials if they exist
def getMeshMaterial(self, mat_name: str) -> Tuple[bpy.types.Material, int]:
mat_name = mat_name.lower().lstrip()
if self.vmt_importer is None or not mat_name or mat_name == "phy":
return super().getMeshMaterial(mat_name)
smd = self.smd
md: bpy.types.Mesh = smd.m.data
# search for material file
mat_name_path = vmf_path(mat_name + ".vmt")
for mat_dir in self._cdmaterials:
mat_path = "materials" / mat_dir / mat_name_path
if mat_path in self.vmf_fs:
mat_name = splitext(mat_path)[0]
break
else:
if mat_name not in self._missing_materials:
sys.__stdout__.write(f"WARNING: MISSING MATERIAL: {mat_name}\n")
self._missing_materials.add(mat_name)
return super().getMeshMaterial(mat_name)
staged = self.vmt_importer.stage(
mat_name,
lambda: VMT(
self.vmf_fs.open_file_utf8(mat_path),
self.vmf_fs,
allow_patch=True,
)
)
material = staged.get_material()
mat_ind = md.materials.find(material.name)
if mat_ind == -1:
mat_ind = len(md.materials)
md.materials.append(material)
return material, mat_ind
class NewQCInfo(NamedTuple):
path: str
root: str
class StagedQC():
def __init__(self, importer: 'QCImporter', name: str, context: bpy.types.Context,
info: Optional[NewQCInfo] = None, reused: Optional[bpy.types.Armature] = None) -> None:
self.name = name
self.context = context
self.info = info
self.reused = reused
self._qc_importer = importer
@staticmethod
def from_existing(importer: 'QCImporter', armature: bpy.types.Armature, context: bpy.types.Context) -> 'StagedQC':
return StagedQC(importer, armature.qc_data.full_name, context, reused=armature)
class QCImporter():
def __init__(self, dec_models_path: str, vmf_fs: VMFFileSystem = VMFFileSystem(),
vmt_importer: Optional['import_vmt.VMTImporter'] = None,
skip_collision: bool = True, skip_lod: bool = True, skip_anim: bool = False,
reuse_old: bool = True, verbose: bool = False):
self._cache: Dict[str, FakeSmd] = {}
self._cache_uniqueness: DefaultDict[str, bool] = defaultdict(lambda: True)
self.verbose = verbose
self.dec_models_path = dec_models_path
self.vmf_fs = vmf_fs
self.reuse_old = reuse_old
self.skip_collision = skip_collision
self.skip_lod = skip_lod
self.skip_anim = skip_anim
self.progress_callback: Callable[[int, int], None] = lambda current, total: None
self._staging: Dict[str, StagedQC] = {}
self._loaded: Dict[str, StagedQC] = {}
self.reusable_amount = 0
self.importable_amount = 0
SmdImporterWrapper.vmt_importer = vmt_importer
SmdImporterWrapper.vmf_fs = vmf_fs
def __enter__(self) -> 'QCImporter':
bpy.utils.unregister_class(import_smd.SmdImporter)
bpy.utils.register_class(SmdImporterWrapper)
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
bpy.utils.unregister_class(SmdImporterWrapper)
bpy.utils.register_class(import_smd.SmdImporter)
def stage(self, name: str, path: str, context: bpy.types.Context, root: str = "") -> StagedQC:
name = name.lower()
truncated_name = truncate_name(name)
if name in self._staging:
return self._staging[name]
if name in self._loaded:
return self._loaded[name]
if self.verbose:
print(f"[VERBOSE] Staging model {name}")
if self.reuse_old and truncated_name in bpy.data.armatures:
meshes = bpy.data.armatures[truncated_name].qc_data.read_meshes()
# mesh needs to be reimported if some materials failed for now
# make sure no meshes of the prop have been manually deleted
if all(mesh is not None for mesh in meshes) and (
SmdImporterWrapper.vmt_importer is None or
all(
material.use_nodes and len(material.node_tree.nodes) != 0
for mesh in meshes for material in mesh.data.materials
)):
self._staging[name] = StagedQC.from_existing(self, bpy.data.armatures[truncated_name], context)
self.reusable_amount += 1
return self._staging[name]
else:
# make sure the mesh isn't reimported every time if the materials failed in the first import
bpy.data.armatures[truncated_name].name = truncated_name + ".001"
self._staging[name] = StagedQC(self, name, context, info=NewQCInfo(path, root))
self.importable_amount += 1
return self._staging[name]
def load_all(self) -> None:
if self.verbose:
print("[VERBOSE] Loading all models...")
total = len(self._staging)
current = 0
for name in self._staging:
staged = self._staging[name]
try:
self._load(name, staged)
except Exception as err:
print(f"[ERROR]: MODEL {name} LOADING FAILED: {err}")
if self.verbose:
traceback.print_exception(type(err), err, err.__traceback__)
else:
self._loaded[name] = staged
current += 1
if current % 5 == 0 or current == total:
self.progress_callback(current, total)
self._staging.clear()
self.reusable_amount = 0
self.importable_amount = 0
def _load(self, name: str, staged: StagedQC) -> None:
name = name.lower()
truncated_name = truncate_name(name)
if staged.reused is not None:
scene_collection = staged.context.scene.collection
# qc is already imported
if self.verbose:
print(f"[VERBOSE] Model {name} previously imported, recreating...")
armature = staged.reused
qc_data = armature.qc_data
armature_obj: bpy.types.Object = bpy.data.objects.new(armature.name, armature)
scene_collection.objects.link(armature_obj)
for mesh_obj in qc_data.read_meshes():
new_obj = mesh_obj.copy()
new_obj.name = new_obj.data.name
new_obj.parent = armature_obj
new_obj.scale = (1, 1, 1)
new_obj.location = (0, 0, 0)
new_obj.rotation_euler = (0, 0, 0)
armature_modifier = find_armature_modifier(new_obj)
if armature_modifier is None:
armature_modifier = new_obj.modifiers.new("Armature", 'ARMATURE')
armature_modifier.object = armature_obj
scene_collection.objects.link(new_obj)
if qc_data.action is not None:
anim_data = armature_obj.animation_data_create()
anim_data.action = qc_data.action
staged.context.view_layer.update()
self._cache[name] = FakeSmd(armature_obj, qc_data.read_bone_id_map())
return
if staged.info is None:
raise Exception("required information was not specified for non-reused staged qc")
path, root = staged.info
if self.verbose:
print(f"[VERBOSE] Importing model {name}...")
SmdImporterWrapper.collection = staged.context.scene.collection
SmdImporterWrapper.name = truncated_name
SmdImporterWrapper.full_name = name
if path.endswith(".mdl"):
qc_path = join(self.dec_models_path, name + ".qc")
if not isfile(qc_path):
# decompiled model doesn't exist, decompile it
mdl_path = vmf_path(name + ".mdl")
mdl_dir = mdl_path.parent
if not isabs(path):
mdl_name = mdl_path.stem
# save required files
saved_files = 0
if mdl_dir not in self.vmf_fs.tree:
raise FileNotFoundError(mdl_dir)
for filename in self.vmf_fs.tree[mdl_dir].files:
if not filename.startswith(mdl_name):
continue
file_out_path = join(self.dec_models_path, mdl_dir, filename)
os.makedirs(dirname(file_out_path), exist_ok=True)
with self.vmf_fs[mdl_dir / filename] as in_f:
with open(file_out_path, 'wb') as out_f:
for line in in_f:
out_f.write(line)
saved_files += 1
if saved_files == 0:
print(f"[ERROR] MODEL {mdl_path} NOT FOUND")
raise FileNotFoundError(mdl_path)
full_mdl_path = str(self.dec_models_path / mdl_path)
else:
full_mdl_path = path
# call the decompiler
result = subprocess.run(
crowbar_command(full_mdl_path, str(self.dec_models_path / mdl_dir)),
text=True, errors='replace', stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
)
alternate_qc_dir = splitext(qc_path)[0]
alternate_qc_path = join(alternate_qc_dir, basename(name) + ".qc")
if isdir(alternate_qc_dir) and isfile(alternate_qc_path):
# model could be decompiled into different location if user has edited settings in Crowbar
qc_dir = dirname(qc_path)
for filename in os.listdir(alternate_qc_dir):
filepath = join(alternate_qc_dir, filename)
new_filepath = join(qc_dir, filename)
os.replace(filepath, new_filepath)
os.rmdir(alternate_qc_dir)
if result.returncode != 0 or not isfile(qc_path):
print(result.stdout)
raise Exception(f"Decompiling model {mdl_path} failed")
path = qc_path
SmdImporterWrapper.root = self.dec_models_path
else:
SmdImporterWrapper.root = root
log_capture = StringIO()
try:
with redirect_stdout(log_capture):
bpy.ops.import_scene._io_import_vmf_smd_wrapper(
filepath=path,
skip_collision=self.skip_collision,
skip_lod=self.skip_lod,
skip_anim=self.skip_anim,
)
except Exception:
print(log_capture.getvalue())
raise
try:
fake_smd = FakeSmd.from_bst(SmdImporterWrapper.smd)
except Exception as err:
raise Exception(f"Error importing {name}: {err}")
if name.startswith("models/props/autocombine/"):
# cannot cache autocombine props, they have conflicting smd files
os.remove(path)
self._cache[name] = fake_smd
if fake_smd.a.name in bpy.context.scene.collection.objects:
bpy.context.scene.collection.objects.unlink(fake_smd.a)
qc_data = fake_smd.a.data.qc_data
qc_data.save_meshes(fake_smd.a.children)
qc_data.save_bone_id_map(fake_smd.boneIDs)
if fake_smd.a.animation_data is not None:
qc_data.action = fake_smd.a.animation_data.action
self._cache[name] = fake_smd
def get_smd(self, name: str, collection: bpy.types.Collection, context: bpy.types.Context) -> FakeSmd:
name = name.lower()
if name not in self._cache:
raise Exception(f"model {name} hasn't been imported")
self._cache_uniqueness[name] = False
smd = self._cache[name]
scene_collection = context.scene.collection
if smd.a.name in scene_collection.objects:
scene_collection.objects.unlink(smd.a)
collection.objects.link(smd.a)
for child in smd.a.children:
if child.name in scene_collection.objects:
scene_collection.objects.unlink(child)
collection.objects.link(child)
return self._cache[name]
def get(self, name: str, collection: bpy.types.Collection, context: bpy.types.Context) -> bpy.types.Object:
return self.get_smd(name, collection, context).a
def get_unique_smd(self, name: str, collection: bpy.types.Collection, context: bpy.types.Context) -> FakeSmd:
name = name.lower()
if name not in self._cache:
raise Exception(f"model {name} hasn't been imported")
if self._cache_uniqueness[name]:
return self.get_smd(name, collection, context)
if self.verbose:
print(f"[VERBOSE] Copying model {name}...")
smd = self._cache[name].copy()
original_arm = smd.a
copy_arm = original_arm.copy()
collection.objects.link(copy_arm)
for child in original_arm.children:
twin = child.copy()
twin.parent = copy_arm
armature_modifier = find_armature_modifier(twin)
if armature_modifier is not None:
armature_modifier.object = copy_arm
collection.objects.link(twin)
smd.a = copy_arm
return smd
def get_unique(self, name: str, collection: bpy.types.Collection, context: bpy.types.Context) -> bpy.types.Object:
return self.get_unique_smd(name, collection, context).a
| 45.071926
| 118
| 0.61186
|
0d096586571f75aff232b1fb0e0b477f50081a66
| 1,649
|
py
|
Python
|
src/io_utils/path_configs/ascat/paths_hsaf_ascat_ssmcdr.py
|
wpreimes/io_utils
|
9ef4161a5bc65ab2fabee0e2c7cf873f19cf7a17
|
[
"MIT"
] | null | null | null |
src/io_utils/path_configs/ascat/paths_hsaf_ascat_ssmcdr.py
|
wpreimes/io_utils
|
9ef4161a5bc65ab2fabee0e2c7cf873f19cf7a17
|
[
"MIT"
] | 4
|
2021-07-09T09:03:12.000Z
|
2021-12-20T17:24:32.000Z
|
src/io_utils/path_configs/ascat/paths_hsaf_ascat_ssmcdr.py
|
wpreimes/io_utils
|
9ef4161a5bc65ab2fabee0e2c7cf873f19cf7a17
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from collections import OrderedDict
import os
import getpass
import io_utils.root_path as root_path
path_settings = \
{
('HSAF_ASCAT', 'SSM', 'H115+H116'):
# paths will be tried in this order, there is no limit to the potential pathes here
OrderedDict([
('local',
{
'win': os.path.join(root_path.d,
'data-read',
'HSAF_ASCAT_SSMCDR',
'H115+H116r8'),
'lin': os.path.join(root_path.dr,
'USERS',
getpass.getuser(),
'HSAF_ASCAT_SSMCDR',
'H115+H116r8'),
}),
('radar',
{
'win': os.path.join(root_path.r,
'Projects',
'H_SAF_CDOP3',
'05_deliverables_products',
'H116',
'H115+H116r8'),
'lin': os.path.join(root_path.r,
'Projects',
'H_SAF_CDOP3',
'05_deliverables_products',
'H116',
'H115+H116r8'),
}),
]),
}
| 39.261905
| 91
| 0.306246
|
769113c44a336e1b05826450b2bbf8b425386016
| 943
|
py
|
Python
|
WEEKS/CD_Sata-Structures/_RESOURCES/Whiteboard-Pairing/ReverseSubList/model_solution/model_solution.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 13
|
2021-03-11T00:25:22.000Z
|
2022-03-19T00:19:23.000Z
|
WEEKS/CD_Sata-Structures/_RESOURCES/Whiteboard-Pairing/ReverseSubList/model_solution/model_solution.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 162
|
2021-03-09T01:52:11.000Z
|
2022-03-12T01:09:07.000Z
|
WEEKS/CD_Sata-Structures/_RESOURCES/Whiteboard-Pairing/ReverseSubList/model_solution/model_solution.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 12
|
2021-04-26T19:43:01.000Z
|
2022-01-31T08:36:29.000Z
|
def reverseLinkedList(node):
current = node
nextNode = None
prevNode = None
while current:
# store a reference to the next list node
# before we overwrite current.next
nextNode = current.next
# reverse the 'next' pointer
current.next = prevNode
# step forward to the next list node
prevNode = current
current = nextNode
return prevNode.value
class ListNode:
def __init__(self, value):
self.value = value
self.next = None
a = ListNode("a")
b = ListNode("b")
c = ListNode("c")
d = ListNode("d")
e = ListNode("e")
a.next = b
b.next = c
c.next = d
d.next = e
# Function that prints the contents of a linked list
def printList(node):
current = node
while current:
print(current.value)
current = current.next
print(reverseLinkedList(a))
# should print 'e'
printList(e)
# should print 'e', 'd', 'c', 'b', 'a'
| 18.490196
| 52
| 0.611877
|
541d619388abbdc2c29d87bf8de59743a507e46d
| 5,242
|
py
|
Python
|
ginga/gw/ColorBar.py
|
saimn/ginga
|
9daf1875b4c1b0fad0a053c5f258bf7d4c0f3455
|
[
"BSD-3-Clause"
] | null | null | null |
ginga/gw/ColorBar.py
|
saimn/ginga
|
9daf1875b4c1b0fad0a053c5f258bf7d4c0f3455
|
[
"BSD-3-Clause"
] | null | null | null |
ginga/gw/ColorBar.py
|
saimn/ginga
|
9daf1875b4c1b0fad0a053c5f258bf7d4c0f3455
|
[
"BSD-3-Clause"
] | null | null | null |
#
# ColorBar.py -- color bar widget
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.misc import Callback
from ginga import RGBMap
from ginga.gw import Viewers
from ginga.canvas.types import utils
class ColorBarError(Exception):
pass
class ColorBar(Callback.Callbacks):
def __init__(self, logger, rgbmap=None, link=False):
Callback.Callbacks.__init__(self)
self.logger = logger
self.link_rgbmap = link
if not rgbmap:
rgbmap = RGBMap.RGBMapper(logger)
self._start_x = 0
self._sarr = None
cbar = Viewers.CanvasView(logger=self.logger)
width, height = 1, 28
cbar.set_desired_size(width, height)
cbar.enable_autozoom('off')
cbar.enable_autocuts('off')
# In web backend, JPEG rendering makes for mushy text
## settings = cbar.get_settings()
## settings.set(html5_canvas_format='png')
# to respond quickly to contrast adjustment
#cbar.defer_lagtime = 0.005
cbar.set_bg(0.4, 0.4, 0.4)
# for debugging
cbar.set_name('colorbar')
self.cbar_view = cbar
# add callbacks for contrast adjustment, etc.
cbar.add_callback('configure', self.resize_cb)
cbar.add_callback('cursor-down', self.cursor_press_cb)
cbar.add_callback('cursor-move', self.cursor_drag_cb)
cbar.add_callback('cursor-up', self.cursor_release_cb)
cbar.add_callback('draw-up', self.draw_release_cb)
cbar.add_callback('none-move', self.none_move_cb)
cbar.add_callback('zoom-scroll', self.scroll_cb)
#cbar.configure(width, height)
iw = Viewers.GingaViewerWidget(viewer=cbar)
self.widget = iw
iw.resize(width, height)
canvas = self.cbar_view.get_canvas()
self.cbar = utils.ColorBar(offset=0, height=height, rgbmap=rgbmap,
fontsize=8)
canvas.add(self.cbar, tag='colorbar')
self.set_rgbmap(rgbmap)
# For callbacks
for name in ('motion', 'scroll'):
self.enable_callback(name)
def get_widget(self):
return self.widget
def get_rgbmap(self):
return self.rgbmap
def set_rgbmap(self, rgbmap):
self.rgbmap = rgbmap
self.cbar.rgbmap = rgbmap
# TODO: figure out if we can get rid of this link option
if self.link_rgbmap:
rgbmap.add_callback('changed', self.rgbmap_cb)
self.redraw()
def set_cmap(self, cm):
self.rgbmap.set_cmap(cm)
self.redraw()
def set_imap(self, im, reset=False):
self.rgbmap.set_imap(im)
self.redraw()
def set_range(self, loval, hival):
self.cbar_view.cut_levels(loval, hival)
self.redraw()
def resize_cb(self, viewer, width, height):
self.logger.info("colorbar resized to %dx%d" % (width, height))
self.cbar.height = height
self.cbar_view.redraw(whence=0)
def redraw(self):
self.cbar_view.redraw()
def shift_colormap(self, pct):
if self._sarr is None:
return
self.rgbmap.set_sarr(self._sarr, callback=False)
self.rgbmap.shift(pct)
self.redraw()
def stretch_colormap(self, pct):
self.rgbmap.stretch(pct)
self.redraw()
def rgbmap_cb(self, rgbmap):
self.redraw()
def cursor_press_cb(self, canvas, event, data_x, data_y):
x, y = event.viewer.get_last_win_xy()
self._start_x = x
sarr = self.rgbmap.get_sarr()
self._sarr = sarr.copy()
return True
def cursor_release_cb(self, canvas, event, data_x, data_y):
x, y = event.viewer.get_last_win_xy()
dx = x - self._start_x
wd, ht = event.viewer.get_window_size()
pct = float(dx) / float(wd)
#print "dx=%f wd=%d pct=%f" % (dx, wd, pct)
self.shift_colormap(pct)
return True
def draw_release_cb(self, canvas, event, data_x, data_y):
self.rgbmap.reset_cmap()
return True
def cursor_drag_cb(self, canvas, event, data_x, data_y):
x, y = event.viewer.get_last_win_xy()
wd, ht = event.viewer.get_window_size()
dx = x - self._start_x
pct = float(dx) / float(wd)
#print "dx=%f wd=%d pct=%f" % (dx, wd, pct)
self.shift_colormap(pct)
return True
def none_move_cb(self, canvas, event, data_x, data_y):
x, y = event.viewer.get_last_win_xy()
wd, ht = event.viewer.get_window_size()
dist = self.rgbmap.get_dist()
pct = float(x) / float(wd)
rng_pct = dist.get_dist_pct(pct)
loval, hival = event.viewer.get_cut_levels()
value = float(loval + (rng_pct * (hival - loval)))
self.make_callback('motion', value, event)
return True
def scroll_cb(self, viewer, event):
direction = event.direction
if (direction < 90.0) or (direction > 270.0):
# up
scale_factor = 1.1
else:
# not up!
scale_factor = 0.9
self.stretch_colormap(scale_factor)
self.make_callback('scroll', event)
#END
| 30.126437
| 74
| 0.611599
|
1ef75af2b56f3747ecc191d58535312a305b5629
| 290
|
py
|
Python
|
visualize.py
|
Phillyclause89/Minecraft-Terrain-AI
|
1afb2e16164e022c92074511115eb1a2f2167930
|
[
"MIT"
] | null | null | null |
visualize.py
|
Phillyclause89/Minecraft-Terrain-AI
|
1afb2e16164e022c92074511115eb1a2f2167930
|
[
"MIT"
] | null | null | null |
visualize.py
|
Phillyclause89/Minecraft-Terrain-AI
|
1afb2e16164e022c92074511115eb1a2f2167930
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import pickle
pickle_in = open("training_data.pickle", "rb")
training_data = pickle.load(pickle_in)
for data in training_data:
img = data[0]
choise = data[1]
cv2.imshow("test", img)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
| 20.714286
| 46
| 0.710345
|
c02b63a4ad0a03043c4e6ed007fcf31f5767deeb
| 2,430
|
py
|
Python
|
app/main/forms.py
|
Unicomcat/flask_test
|
c5951842092157966c536dcafff537c9509c9881
|
[
"MIT"
] | null | null | null |
app/main/forms.py
|
Unicomcat/flask_test
|
c5951842092157966c536dcafff537c9509c9881
|
[
"MIT"
] | null | null | null |
app/main/forms.py
|
Unicomcat/flask_test
|
c5951842092157966c536dcafff537c9509c9881
|
[
"MIT"
] | null | null | null |
from flask.ext.wtf import Form
from wtforms import StringField, TextAreaField, BooleanField, SelectField,\
SubmitField
from wtforms.validators import Required, Length, Email, Regexp
from wtforms import ValidationError
from ..models import Role, User
from flask.ext.pagedown.fields import PageDownField
class NameForm(Form):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('Submit')
class EditProfileForm(Form):
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
class EditProfileAdminForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
username = StringField('Username', validators=[
Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
confirmed = BooleanField('Confirmed')
role = SelectField('Role', coerce=int)
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name)
for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if field.data != self.user.email and \
User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if field.data != self.user.username and \
User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class PostForm(Form):
#body = TextAreaField("What's on your mind?", validators=[Required()])
body = PageDownField("what's on your mind?", validators=[Required()])
submit = SubmitField('Submit')
class CommentForm(Form):
body = StringField('',validators=[Required()])
submit = SubmitField('Submit')
| 39.836066
| 78
| 0.636626
|
3e85fb3d89316cd6d540a775d56831e36eb9292a
| 1,249
|
py
|
Python
|
{{cookiecutter.prefix}}_{{cookiecutter.project_slug|lower}}/{{cookiecutter.project_slug|lower}}/extern/qtawesome/animation.py
|
melMass/pyvfx-boilerplate
|
a4df88a6e272514205a5bf34a88f4a27daa66f9c
|
[
"MIT"
] | null | null | null |
{{cookiecutter.prefix}}_{{cookiecutter.project_slug|lower}}/{{cookiecutter.project_slug|lower}}/extern/qtawesome/animation.py
|
melMass/pyvfx-boilerplate
|
a4df88a6e272514205a5bf34a88f4a27daa66f9c
|
[
"MIT"
] | null | null | null |
{{cookiecutter.prefix}}_{{cookiecutter.project_slug|lower}}/{{cookiecutter.project_slug|lower}}/extern/qtawesome/animation.py
|
melMass/pyvfx-boilerplate
|
a4df88a6e272514205a5bf34a88f4a27daa66f9c
|
[
"MIT"
] | null | null | null |
from Qt.QtCore import QTimer
class Spin:
def __init__(self, parent_widget, interval=10, step=1):
self.parent_widget = parent_widget
self.interval, self.step = interval, step
self.info = {}
def _update(self):
if self.parent_widget in self.info:
timer, angle, step = self.info[self.parent_widget]
if angle >= 360:
angle = 0
angle += step
self.info[self.parent_widget] = timer, angle, step
self.parent_widget.update()
def setup(self, icon_painter, painter, rect):
if self.parent_widget not in self.info:
timer = QTimer()
timer.timeout.connect(self._update)
self.info[self.parent_widget] = [timer, 0, self.step]
timer.start(self.interval)
else:
timer, angle, self.step = self.info[self.parent_widget]
x_center = rect.width() * 0.5
y_center = rect.height() * 0.5
painter.translate(x_center, y_center)
painter.rotate(angle)
painter.translate(-x_center, -y_center)
class Pulse(Spin):
def __init__(self, parent_widget):
Spin.__init__(self, parent_widget, interval=300, step=45)
| 29.738095
| 67
| 0.592474
|
e2014d5359501b972b70c4ffcf2d05d8c9ea9fb3
| 3,228
|
py
|
Python
|
malepierre/characters/models.py
|
EliotBerriot/malepierre
|
34a11beae528242c062fcc308c2d98c28fa61fd1
|
[
"BSD-3-Clause"
] | null | null | null |
malepierre/characters/models.py
|
EliotBerriot/malepierre
|
34a11beae528242c062fcc308c2d98c28fa61fd1
|
[
"BSD-3-Clause"
] | null | null | null |
malepierre/characters/models.py
|
EliotBerriot/malepierre
|
34a11beae528242c062fcc308c2d98c28fa61fd1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils import timezone
from django.core.urlresolvers import reverse
class CodeMixin(models.Model):
code = models.CharField(max_length=255, unique=True)
class Meta:
abstract = True
class NameMixin(models.Model):
name = models.CharField(max_length=255, unique=True)
class Meta:
abstract = True
ordering = ('name',)
def __str__(self):
return self.name
class AbstractSet(models.Model):
max_choices = models.IntegerField(default=1)
class Meta:
abstract = True
def __str__(self):
return '/'.join([choice.name for choice in self.choices.all()])
class DescriptionMixin(models.Model):
description = models.TextField(null=True, blank=True)
class Meta:
abstract = True
class Character(NameMixin):
background = models.TextField(null=True, blank=True)
creation_date = models.DateTimeField(default=timezone.now)
class Talent(CodeMixin, NameMixin, DescriptionMixin):
linked_talent = models.ForeignKey('self', null=True, blank=True)
def get_absolute_url(self):
return reverse('talents:index') + '#{0}'.format(self.code)
class TalentSet(AbstractSet):
choices = models.ManyToManyField(Talent, related_name='talentsets')
class Skill(CodeMixin, NameMixin, DescriptionMixin):
ATTRIBUTE_CHOICES = (
('strength', 'Strength'),
('constitution', 'Constitution'),
('agility', 'Agility'),
('intelligence', 'Intelligence'),
('mental_strength', 'Mental strength'),
('sociability', 'Sociability'),
)
TYPE_CHOICES = (
('base', 'Base'),
('advanced', 'Advanced'),
)
attribute = models.CharField(choices=ATTRIBUTE_CHOICES, max_length=30, blank=True, null=True)
type = models.CharField(choices=TYPE_CHOICES, max_length=30, default='base')
linked_talents = models.ManyToManyField(Talent, blank=True, related_name='linked_skills')
linked_skill = models.ForeignKey('self', null=True, blank=True)
def get_absolute_url(self):
return reverse('skills:index') + '#{0}'.format(self.code)
class SkillSet(AbstractSet):
choices = models.ManyToManyField(Skill, related_name='skillsets')
class Career(CodeMixin, NameMixin, DescriptionMixin):
exits = models.ManyToManyField('self', blank=True, symmetrical=False, related_name='access')
talents = models.ManyToManyField(TalentSet, blank=True)
skills = models.ManyToManyField(SkillSet, blank=True)
# main profile
cc = models.IntegerField(default=0)
ct = models.IntegerField(default=0)
strength = models.IntegerField(default=0)
constitution = models.IntegerField(default=0)
agility = models.IntegerField(default=0)
intelligence = models.IntegerField(default=0)
mental_strength = models.IntegerField(default=0)
sociability = models.IntegerField(default=0)
# secondary profile
attacks = models.IntegerField(default=0)
wounds = models.IntegerField(default=0)
movement = models.IntegerField(default=0)
magic = models.IntegerField(default=0)
def get_absolute_url(self):
return reverse('careers:index') + '#{0}'.format(self.code)
| 31.038462
| 97
| 0.695477
|
578fb9b6b50ad50e03cad4b76a93a76bb7f64c0c
| 2,380
|
py
|
Python
|
MultiRoom.py
|
dannywen2/bilibili-live-tools
|
99f34daec567c3ec1d315ebbfd36350ea8a0513e
|
[
"MIT"
] | 1
|
2021-10-02T09:30:49.000Z
|
2021-10-02T09:30:49.000Z
|
MultiRoom.py
|
dannywen2/bilibili-live-tools
|
99f34daec567c3ec1d315ebbfd36350ea8a0513e
|
[
"MIT"
] | null | null | null |
MultiRoom.py
|
dannywen2/bilibili-live-tools
|
99f34daec567c3ec1d315ebbfd36350ea8a0513e
|
[
"MIT"
] | null | null | null |
import random
import asyncio
from bilibili import bilibili
from printer import Printer
async def get_area_list():
response = await bilibili().req_area_list()
json_response = await response.json(content_type=None)
return [ area_info['id'] for area_info in json_response['data'] ]
async def area2room(area_id):
while True:
try:
url = "https://api.live.bilibili.com/room/v1/area/getRoomList?platform=web&parent_area_id=" + \
str(area_id) + "&cate_id=0&area_id=0&sort_type=online&page=1&page_size=30"
response = await bilibili().bili_section_get(url)
json_response = await response.json(content_type=None)
checklen = len(json_response['data'])
rand_num = random.randint(0, checklen-1)
new_area_id = json_response['data'][rand_num]['parent_id']
if not new_area_id == int(area_id):
continue
area_room = json_response['data'][rand_num]['roomid']
state = await bilibili().check_room_state(area_room)
if state == 1:
new_area = str(new_area_id) + json_response['data'][rand_num]['parent_name']
return [area_room, new_area]
else:
Printer().printer("检测到获取房间未开播,立即尝试重新获取", "Error", "red")
except Exception as e:
Printer().printer(f"获取房间列表失败,5s后进行下次尝试 {repr(e)}", "Error", "red")
await asyncio.sleep(5)
async def check_state(area, roomid=None):
if roomid is not None:
response = await bilibili().check_room_info(roomid)
json_response = await response.json(content_type=None)
live_status = json_response['data']['live_status']
curr_area_name = json_response['data']['parent_area_name']
if live_status == 1 and curr_area_name in area:
Printer().printer(f'[{area}分区] 房间 {roomid} 直播状态正常', "Info", "green")
return [roomid, area]
elif live_status != 1:
Printer().printer(f"[{area}分区] 房间 {roomid} 已未直播!将切换监听房间", "Info", "green")
else:
# print(type(live_status), live_status, curr_area_name)
Printer().printer(f"[{area}分区] 房间 {roomid} 已切换分区[{curr_area_name}]!将切换监听房间", "Info", "green")
return await area2room(area[0])
async def get_all(area_list):
return [await area2room(i) for i in area_list]
| 41.754386
| 107
| 0.627731
|
d9628590d40c80b269f815f75a9eda5f9c8783bf
| 1,767
|
py
|
Python
|
format_type.py
|
jmp75/CPPDebuggerVisualizers
|
46d64f255e98e2d6fe6d3562855b93bb55c93fb4
|
[
"BSL-1.0"
] | 141
|
2015-04-29T10:17:06.000Z
|
2022-01-24T12:45:34.000Z
|
format_type.py
|
x-santiaga-x/CPPDebuggerVisualizers
|
b6852c74bd7eff1347a9dcb748633e957cad2e54
|
[
"BSL-1.0"
] | 28
|
2016-09-23T14:03:46.000Z
|
2022-01-10T14:26:36.000Z
|
format_type.py
|
x-santiaga-x/CPPDebuggerVisualizers
|
b6852c74bd7eff1347a9dcb748633e957cad2e54
|
[
"BSL-1.0"
] | 40
|
2015-11-23T01:45:35.000Z
|
2021-12-16T14:33:56.000Z
|
'''Tools that format C++ type for reading'''
cpp_type = input("Please enter type: ")
if not cpp_type:
cpp_type = "boost::gil::variant<boost::mpl::vector<boost::gil::image<boost::gil::pixel<unsigned char,boost::gil::layout<boost::mpl::vector1<boost::gil::gray_color_t>,boost::mpl::range_c<int,0,1> > >,0,std::allocator<unsigned char> >,boost::gil::image<boost::gil::pixel<unsigned char,boost::gil::layout<boost::mpl::vector3<boost::gil::red_t,boost::gil::green_t,boost::gil::blue_t>,boost::mpl::range_c<int,0,3> > >,0,std::allocator<unsigned char> >,boost::gil::image<boost::gil::pixel<unsigned char,boost::gil::layout<boost::mpl::vector4<boost::gil::red_t,boost::gil::green_t,boost::gil::blue_t,boost::gil::alpha_t>,boost::mpl::range_c<int,0,4> > >,0,std::allocator<unsigned char> >,boost::gil::image<boost::gil::pixel<unsigned short,boost::gil::layout<boost::mpl::vector1<boost::gil::gray_color_t>,boost::mpl::range_c<int,0,1> > >,0,std::allocator<unsigned char> >,boost::gil::image<boost::gil::pixel<unsigned short,boost::gil::layout<boost::mpl::vector3<boost::gil::red_t,boost::gil::green_t,boost::gil::blue_t>,boost::mpl::range_c<int,0,3> > >,0,std::allocator<unsigned char> >,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na,boost::mpl::na> >"
print()
indent = 0
for c in cpp_type:
if c == "<":
print(c)
indent+=1
print(indent*" ", end="")
elif c == ">":
indent-=1
print()
print(indent*" " + c, end="")
elif c == ",":
print(c, end="")
print()
print(indent*" ", end="")
else:
print(c, end="")
| 80.318182
| 1,311
| 0.642332
|
c11737200d4e22668c9c2b011f5ac152a1da9583
| 1,272
|
py
|
Python
|
ProxyCrawl/ProxyCrawl/migrate.py
|
Time1ess/ProxyPool
|
c44e74e8045fc560e5fe905aa41135ecb3e6da98
|
[
"MIT"
] | 18
|
2017-04-25T09:39:08.000Z
|
2022-03-09T08:07:28.000Z
|
ProxyCrawl/ProxyCrawl/migrate.py
|
ghosttyq/ProxyPool
|
c44e74e8045fc560e5fe905aa41135ecb3e6da98
|
[
"MIT"
] | null | null | null |
ProxyCrawl/ProxyCrawl/migrate.py
|
ghosttyq/ProxyPool
|
c44e74e8045fc560e5fe905aa41135ecb3e6da98
|
[
"MIT"
] | 10
|
2017-05-29T00:53:41.000Z
|
2021-05-08T09:07:52.000Z
|
#!/usr/local/bin/python3
# coding: UTF-8
# Author: David
# Email: youchen.du@gmail.com
# Created: 2017-04-27 09:12
# Last modified: 2017-04-27 09:36
# Filename: migrate.py
# Description:
labels = ('name', 'url_fmt', 'row_xpath', 'host_xpath', 'port_xpath',
'addr_xpath', 'mode_xpath', 'proto_xpath', 'vt_xpath')
data = [
('DoubleSixip', 'http://www.66ip.cn/{}.html', '//div[@id="main"]//tr',
'td[1]/text()', 'td[2]/text()', 'td[3]/text()', 'td[4]/text()', 'null',
'td[5]/text()'),
('kuaidaili', 'http://www.kuaidaili.com/free/intr/{}',
'//div[@id="list"]/table//tr', 'td[1]/text()', 'td[2]/text()',
'td[5]//text()', 'td[3]/text()', 'td[4]/text()', 'td[7]/text()'),
('kuaidaili2', 'http://www.kuaidaili.com/free/inha/{}',
'//div[@id="list"]/table//tr', 'td[1]/text()', 'td[2]/text()',
'td[5]//text()', 'td[3]/text()', 'td[4]/text()', 'td[7]/text()'),
('xici', 'http://www.xicidaili.com/nt/{}', '//table[@id="ip_list"]//tr',
'td[2]/text()', 'td[3]/text()', 'td[4]//text()', 'td[5]/text()',
'td[6]/text()', 'td[10]/text()')]
with open('rules.csv', 'wb') as f:
f.write('{}\n'.format(' '.join(labels)).encode('utf-8'))
for item in data:
f.write('{}\n'.format(' '.join(item)).encode('utf-8'))
| 41.032258
| 76
| 0.52673
|
dd01106f50de9d597c66e9d0ed3c5b74dd7852a6
| 24
|
py
|
Python
|
src/Protractor3D/__init__.py
|
SvenKratz/Protractor3D
|
39b6c877cc88cae028ca938e994034b83fcccb68
|
[
"MIT"
] | 4
|
2018-02-06T14:41:26.000Z
|
2020-03-19T14:16:05.000Z
|
src/Protractor3D/__init__.py
|
SvenKratz/Protractor3D
|
39b6c877cc88cae028ca938e994034b83fcccb68
|
[
"MIT"
] | null | null | null |
src/Protractor3D/__init__.py
|
SvenKratz/Protractor3D
|
39b6c877cc88cae028ca938e994034b83fcccb68
|
[
"MIT"
] | null | null | null |
__all__=["Protractor3D"]
| 24
| 24
| 0.791667
|
691e2bc03c8380b89a2a41d53775315d820a3654
| 2,520
|
py
|
Python
|
venv/lib/python3.5/site-packages/bears/matlab/MatlabIndentationBear.py
|
prashant0598/CoffeeApp
|
4fa006aebf06e12ed34766450ddcfa548ee63307
|
[
"MIT"
] | null | null | null |
venv/lib/python3.5/site-packages/bears/matlab/MatlabIndentationBear.py
|
prashant0598/CoffeeApp
|
4fa006aebf06e12ed34766450ddcfa548ee63307
|
[
"MIT"
] | null | null | null |
venv/lib/python3.5/site-packages/bears/matlab/MatlabIndentationBear.py
|
prashant0598/CoffeeApp
|
4fa006aebf06e12ed34766450ddcfa548ee63307
|
[
"MIT"
] | null | null | null |
import re
from coalib.bearlib import deprecate_settings
from coalib.bears.LocalBear import LocalBear
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
class MatlabIndentationBear(LocalBear):
LANGUAGES = {'Matlab', 'Octave'}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting'}
@deprecate_settings(indent_size='tab_width')
def run(self, filename, file, indent_size: int=2):
"""
This bear features a simple algorithm to calculate the right
indentation for Matlab/Octave code. However, it will not handle hanging
indentation or conditions ranging over several lines yet.
:param indent_size: Number of spaces per indentation level.
"""
new_file = tuple(self.reindent(file, indent_size))
if new_file != tuple(file):
wholediff = Diff.from_string_arrays(file, new_file)
for diff in wholediff.split_diff():
yield Result(
self,
'The indentation could be changed to improve readability.',
severity=RESULT_SEVERITY.INFO,
affected_code=(diff.range(filename),),
diffs={filename: diff})
@staticmethod
def reindent(file, indentation):
indent, nextindent = 0, 0
for line_nr, line in enumerate(file, start=1):
indent = nextindent
indent, nextindent = MatlabIndentationBear.get_indent(line,
indent,
nextindent)
stripped = line.lstrip()
if stripped:
yield indent*indentation*' ' + stripped
else:
yield line
@staticmethod
def get_indent(line, indent, nextindent):
ctrlstart = r'\s*(function|if|while|for|switch)'
ctrlcont = r'\s*(elseif|else|case|catch|otherwise)'
ctrlend = r'\s*(end|endfunction|endif|endwhile|endfor|endswitch)'
if re.match(ctrlstart, line) is not None:
return indent, nextindent+1
elif re.match(ctrlcont, line) is not None:
return indent-1, nextindent
elif re.match(ctrlend, line) is not None:
return indent-1, nextindent-1
else:
return indent, indent
| 38.769231
| 79
| 0.599206
|
4e8d735c3fb9e5f02a33e77136bc4d6b884815d2
| 2,540
|
py
|
Python
|
artmap_fuzzy.py
|
DavidVinicius/artmap-fuzzy-tcc
|
1ff039ca2ade7a2a96137c75feaa9509e401e387
|
[
"MIT"
] | null | null | null |
artmap_fuzzy.py
|
DavidVinicius/artmap-fuzzy-tcc
|
1ff039ca2ade7a2a96137c75feaa9509e401e387
|
[
"MIT"
] | null | null | null |
artmap_fuzzy.py
|
DavidVinicius/artmap-fuzzy-tcc
|
1ff039ca2ade7a2a96137c75feaa9509e401e387
|
[
"MIT"
] | null | null | null |
import numpy as np
from src.utils.functions import *
from src.neural_networks.art_fuzzy import ARTFUZZY
A = np.array([
[0.25, 0.25],
[0.25, 0.75],
[0.75, 0.25],
[0.75, 0.75]
])
B = np.array([
[0.25],
[0.75],
[0.75],
[0.25]
])
AC = layerF0(A, 1)
BC = layerF0(B, 1)
WAB = np.ones([AC.shape[0], BC.shape[0]])
rhoA = 0.5
rhoB = 0.9
rhoAB = 0.6
ArtA = ARTFUZZY(AC, rho=rhoA)
ArtB = ARTFUZZY(BC, rho=rhoB)
categoriesA = ArtA.categories()
categoriesB = ArtB.categories()
for i in range(0, len(ArtB.I)):
championB = max(categoriesB)
championIndexB = categoriesB.index(championB)
print()
if ArtB.hadRessonance(ArtB.I[i], ArtB.W[championIndexB]):
ArtB.W[championIndexB] = ArtB.learn(ArtB.I[i], ArtB.W[championIndexB])
ArtB.activate(championIndexB)
ArtB.Js.append([i, championIndexB])
championA = max(categoriesA)
championIndexA = categoriesA.index(championA)
for j in range(0, len(ArtA.I)):
print(i,j)
if ArtA.hadRessonance(ArtA.I[j], ArtA.W[championIndexA]):
ArtA.activate(championIndexA)
ArtA.Js.append([j, championIndexA])
if hadRessonance(ArtB.Y[championIndexB], WAB[championIndexB], rhoAB):
print()
ArtA.W[championIndexA] = ArtA.learn(ArtA.I[j], ArtA.W[championIndexA])
WAB[championIndexA] = activate(WAB, championIndexB)
print(activate(WAB, championIndexB), championIndexA)
print(WAB)
break
else:
categoriesA[championIndexA] = 0
championA = max(categoriesA)
championIndexA = categoriesA.index(championA)
x = AND(ArtB.Y[championIndexB], WAB[championIndexB])
temp = (sum(x) / sum(ArtB.Y[championIndexB]))
ArtA._rho += 0.01
print(ArtA._rho)
else:
categoriesA[championIndex] = 0
champion = max(categoriesA)
championIndex = categoriesA.index(champion)
else:
categoriesB[championIndexB] = 0
| 27.608696
| 110
| 0.487008
|
53087f8a8ddf754046d208f79a9869511f596629
| 481
|
py
|
Python
|
Python/XGBoost/pipeline-dict-vectorizer.py
|
James-McNeill/Learning
|
3c4fe1a64240cdf5614db66082bd68a2f16d2afb
|
[
"MIT"
] | null | null | null |
Python/XGBoost/pipeline-dict-vectorizer.py
|
James-McNeill/Learning
|
3c4fe1a64240cdf5614db66082bd68a2f16d2afb
|
[
"MIT"
] | null | null | null |
Python/XGBoost/pipeline-dict-vectorizer.py
|
James-McNeill/Learning
|
3c4fe1a64240cdf5614db66082bd68a2f16d2afb
|
[
"MIT"
] | null | null | null |
# Aim is to combine the LabelEncoder and OneHotEncoder methods into one step
# Import DictVectorizer
from sklearn.feature_extraction import DictVectorizer
# Convert df into a dictionary: df_dict
df_dict = df.to_dict("records")
# Create the DictVectorizer object: dv
dv = DictVectorizer(sparse=False)
# Apply dv on df: df_encoded
df_encoded = dv.fit_transform(df_dict)
# Print the resulting first five rows
print(df_encoded[:5,:])
# Print the vocabulary
print(dv.vocabulary_)
| 24.05
| 76
| 0.787942
|
c8a3a37c77e77a6c95cd195a41b0811c67a74883
| 31,691
|
py
|
Python
|
skymodel/snr/SNR.py
|
GiovanniPiano/cta-gps-simulation-paper
|
90b9fbadf364d11cdc7b1a5bfea27bbeb2a28a83
|
[
"BSD-3-Clause"
] | null | null | null |
skymodel/snr/SNR.py
|
GiovanniPiano/cta-gps-simulation-paper
|
90b9fbadf364d11cdc7b1a5bfea27bbeb2a28a83
|
[
"BSD-3-Clause"
] | null | null | null |
skymodel/snr/SNR.py
|
GiovanniPiano/cta-gps-simulation-paper
|
90b9fbadf364d11cdc7b1a5bfea27bbeb2a28a83
|
[
"BSD-3-Clause"
] | null | null | null |
# Define the Supernova class and SNR class and connected procedures
import numpy as np
import matplotlib.pyplot as plt
import math
import pylab as pl
from matplotlib.ticker import ScalarFormatter
import matplotlib.ticker as mticker
import matplotlib.ticker as ticker
from itertools import zip_longest
import os
import csv
from random import choices
from matplotlib import rc
from astropy.io import ascii
from astropy.constants import c
import astropy.units as u
import naima
from naima.models import (ExponentialCutoffBrokenPowerLaw, Synchrotron,
InverseCompton,PionDecay, TableModel)
# CONSTANTS
definition_pevatron=500. #TeV
parsec=3.09*pow(10.,18.)
Msol=1.9*pow(10,33.)
E51=pow(10,51.)
masseproton=1.4*1.67*pow(10,-24.)
c=3.*pow(10,10) # // cm/s
mc2_electron_TeV=511.*pow(10, -9.)
mc2_electron_erg=511.*1.602*pow(10, -9.)
k_boltzmann_erg=1.38*pow(10., -16) #; // erg.
masseproton_TeV= 0.938*pow(10.,-3.) # TeV
sigma_thomson=6.6524*pow(10., -25.) #; // en cm2
kyear_sec=np.pi*pow(10, 10.)
mu= 1.36
gamma_ad=4./3.
beta=6*(gamma_ad-1)/(gamma_ad+1)
sigma=4.
r00=8.5 # en kiloparsec
distance_SS_GC=8.5 # distance to galactic center in kiloparsec
age_sample=10. # kyear 20-40 enough to study Pevatrons
#--------------------------------------------------------
# GAS Distribution functions (Shibata et al. 2010)
def rho_H1 ( r): # / r in kiloparsec , rho_H1 in 10^20 H atoms.cm^-3
return np.exp(- ( 3.862+7.903*pow(10,-1.)*r -9.426*pow(10,-2.)*np.log10(r) - 4.261*pow(r,1./2.) ))
def rho_H2 ( r): #// r in kiloparsec , rho_H1 in 10^20 H atoms.cm^-3
return np.exp(- (1.848+8.339*pow(10,-1.)*r -5.560*np.log10(r) + 2.405*pow(10,-2.)*pow(r,2.)))
def XI_H1 ( r, z ):
return 1/(0.065*np.sqrt(np.pi)+0.160)*(0.4*np.exp(-pow(z/0.12,2.))+0.2*np.exp(-pow(z/0.35, 2.))+0.4*np.exp(-np.fabs(z)/0.40))
def z0 ( r):
return 0.4*np.cosh(2*r/(3*r00))
def XI_H2 ( r, z ):
return 1/(0.036*np.sqrt(np.pi)+0.2*z0(r))*( np.exp(-pow(z/0.071, 2.))+ 0.2*(np.fabs(z)/z0(r))*np.exp(-np.fabs(z)/z0(r)))
#nH1, nH2
def nH1 ( r, z): # // r and z are in kilo parsecs
return 0.57*XI_H1(r, z)/XI_H1(r00, 0)*rho_H1(r)/rho_H1(r00)
def nH2 ( r, z): # // r and z are in kilo parsecs
return 0.53*XI_H2(r, z)/XI_H2(r00, 0)*rho_H2(r)/rho_H2(r00)
def nH ( r, z):
return nH1(r,z)+2*nH2(r, z)
#--------------------------------------------------------
#preliminary functions for type 1 and type2 dynamics : all dynamics are from Ptuskin Zirahaskvili 2005
def alpha1_def ( Mdot, uw6):
return masseproton*((Mdot*pow(10, -5)*Msol/(kyear_sec*pow(10, -3.)))/(4*np.pi*1.4*masseproton*uw6*pow(10, 6)))
def rho1_r (self, r ):
return self.alpha1*pow(r*parsec, -2.)
def rho_r (self, r):
if (r<self.r1):
return self.rho1_r(r)
else:
if (r<self.r2):
return self.rho2
else :
if (r<self.r2+0.5):
return (self.rho0-self.rho2)*(self.r2-self.r1)/0.5
else :
return float(self.rho0)
def INT1 ( self, r ):
return self.Mej*Msol*pow(r*parsec, self.beta)/beta+4*np.pi*self.alpha1*pow(r*parsec,self.beta+1.)/(self.beta+1.)
def INT2 (self, r ):
return self.INT1(self.r1)+ (self.Mej*Msol+4*np.pi*self.alpha1*self.r1*parsec-4./3.*np.pi*self.rho2*pow(self.r1*parsec,3.))* ( pow(r*parsec,self.beta)- pow(self.r1*parsec, self.beta) )/self.beta +4./3.*np.pi*self.rho2*( pow(r*parsec,self.beta+3) -pow(self.r1*parsec, self.beta+3))/(self.beta+3)
def INT0 (self, r):
return self.INT2(self.r2)+ (self.Mej*Msol+4*np.pi*self.alpha1*self.r1*parsec+4./3.*np.pi*self.rho2*(pow(self.r2*parsec,3.)-pow(self.r1*parsec,3.))-4./3.*np.pi*self.rho0*pow(self.r2*parsec,3))*(pow(r*parsec,self.beta)-pow(self.r2*parsec,self.beta))/self.beta + 4./3.*self.rho0*np.pi*(pow(r*parsec,self.beta+3.)-pow(self.r2*parsec,self.beta+3.))/(self.beta+3.)
def INT (self, r):
if (r<self.r1):
return self.INT1(r)
else:
if (r<self.r2):
return self.INT2(r)
else :
return self.INT0(r)
def B ( self, r):
return (3.*(self.gamma_ad-1)*(self.gamma_ad+1.)*self.E_SN*E51/(pow(self.M(r),2. )*pow(r*parsec,self.beta)))
def Ushock2_r(self,r) : # entrée en parsec , sortie en cm/s
return pow(self.B(r)*self.INT(r), 1./2.)
def Ushock1_r(self ,r):
tchange=260.*pow(self.Mej/1.4,5./6.)*pow(self.E_SN,-1./2.)*pow(self.n0,-1./3.)*pow(10,-3.)
rchange= 5.3*pow((self.E_SN/(self.n0*self.Mej)),1./7. )*pow(tchange,4./7.)
if ( r<rchange):
return pow(10,5.)* 2.7*pow(10,3.)*pow((5.3/r),3./4.)*pow((self.E_SN/(self.n0*self.Mej)),1./4.)
else:
return pow(10,5.)*1.7*pow(10,3.)*pow((4.3/r),3./2.)*pow((self.E_SN/self.n0),1./2.)
def t_r (self, r ): # entrée en parsec, sortie en kyear
N=100
RB=np.linspace(0.001,r,N) # in parsec
tb=0.
for i in range (1,N):
tb=tb+(1./self.Ushock2_r(RB[i])+1./self.Ushock2_r(RB[i-1]))*(RB[i]-RB[i-1])/2.*parsec
return tb/(3.*pow(10.,10.))
def Rshock_type2 (self, t ):
step_integration1=200 # 200 ok value
RB=np.logspace(-2.,2.,step_integration1) #maximum distance 100 pc
temp=0.
p=0
while(temp < t*3.*pow(10, 10.)and p<step_integration1 ):
temp=temp+(1./self.Ushock2_r( RB[p]))*(RB[p]-RB[p-1])*parsec
p=p+1
return (RB[p-1]+RB[p])/2.
#--------------------------------------------------------
# PRELIMINARY FUNCTIONS :
def M1( self, r):
return self.Mej*Msol+4*3.1415*self.alpha1*r*parsec
def M2( self, r):
return self.M1(self.r1)+4./3.*3.1415*self.rho2*(pow(r*parsec,3.)-pow(self.r1*parsec,3.))
def M0( self, r):
return self.M2(self.r2)+4./3.*3.1415*self.rho0*(pow(r*parsec,3.)-pow(self.r2*parsec,3.))
def M (self, r):
if (r<self.r1):
return self.M1(r)
else :
if (r<self.r2):
return self.M2(r)
else :
return self.M0(r)
def Tequalitymass2(self):
rr=0.0001
rgrid=0.01 # 0.00005
while ( self.M (rr)/Msol-self.Mej <self.Mej ):
rr=rr+rgrid
if (rr>= self.r1):
rr=self.r1 # to make sure the knee is not in the bubble, otherwise Emax >10**5 at early stage
return self.t_r(rr)
#--------------------------------------------------------
# IN SNR PROCEDURE, RSHOCK AND USHOCK DEFINITIONS
def Rshock_1(self,t):
tchange=260.*pow(self.Mej/1.4,5./6.)*pow(self.E_SN,-1./2.)* pow(self.n0,-1./3.)*pow(10,-3.)
if (t<tchange):
return 5.3*pow( pow(self.E_SN,2.)/(self.n0*self.Mej), 1./7.)*pow(t,4./7.)
else :
return 4.3*pow(self.E_SN/self.n0,1./5.)*pow(t,2./5.)*pow(1.- 0.06*pow(self.Mej,5./6.)/(pow(self.E_SN,1./2.)*pow(self.n0,1./3.)*t) ,2./5.)
def Ushock_1(self,t): # returned value in cm/s
tchange=260.*pow(self.Mej/1.4,5./6.)*pow(self.E_SN,-1./2.)*pow(self.n0,-1./3.)*pow(10,-3.);
if (t<tchange):
return pow(10,5.)*2.7*pow(10,3.)*pow( pow(self.E_SN,2.)/(self.n0*self.Mej), 1./7.)*pow(t,-3./7.)
else :
return pow(10,5.)*1.7*pow(10,3.)*pow(self.E_SN/self.n0,1./5.)*pow(t,-3./5.)*pow(1- 0.06*pow(self.Mej,5./6.)/(pow(self.E_SN,1./2.)*pow(self.n0,1./3.)*t) ,-3./5.)
def Rshock_2(self,t):
r_trans=self.Mej*Msol/(4.*np.pi*self.alpha1*parsec)
# t_trans=0.1;//pow(r_trans/7.7,8./7.)*pow((pow(E_SN,7./2.)*uw6/(Mdot*pow(Mej,5./2.))),-1./7.);
t_trans=self.Tequalitymass2()
Norm_FE=self.Rshock_type2 (t_trans )/pow(t_trans,7./8.)
if (t<t_trans):
return Norm_FE*pow(t,7./8.)
else :
return self.Rshock_type2 ( t )
def Ushock_2(self, t):
r_trans=self.Mej*Msol/(4.*np.pi*self.alpha1*parsec)
t_trans=self.Tequalitymass2()
Norm_FE=self.Ushock2_r (self.Rshock_2(t_trans))*pow(t_trans,1./8.)
if (t<t_trans and t>0.):
return Norm_FE*pow(t,-1./8.)
else :
return self.Ushock2_r ( self.Rshock_2(t))
def Rshock_t(self,t):
if (self.type==1):
return self.Rshock_1(t)
else :
return self.Rshock_2(t)
def Ushock_t(self,t):
if (self.type==1):
return self.Ushock_1(t)
else :
return self.Ushock_2(t)
def Transitiontime1(self):
return 260.*pow(self.Mej/1.4,5./6.)*pow(self.E_SN,-1./2.)*pow(self.n0,-1./3.)*pow(10,-3.)
def Transitiontime2(self):
return self.Tequalitymass2()
def assign_type (self):
a=np.random.uniform(0.,1.)
if ( a<0.32 ) :
self.type=1
else :
if (a<0.76) :
self.type=2
else :
if(a<0.98):
self.type=3
else :
self.type=4
def assign_age(self,age_sample):
self.age= np.random.uniform(0.,1.)*age_sample
self.TIME=np.linspace(self.age,self.age,1)
#--------------------------------------------------------
# DISTRIBUTION TYPE Ia - Bad but OK, should be improved soon
DISTRIB_TYPE_IA=[2.21*pow(10, -2.),2.32*pow(10, -2.),2.38*pow(10, -2),2.27*pow(10, -2),2.12*pow(10, -2),1.89*pow(10, -2),1.68*pow(10, -2),1.49*pow(10, -2),1.33*pow(10, -2), 1.21*pow(10, -2),1.07*pow(10, -2),9.54*pow(10, -3), 8.48*pow(10, -3), 7.36*pow(10, -3), 6.55*pow(10, -3), 5.68*pow(10, -3), 4.09*pow(10, -3), 2.44*pow(10, -3), 1.17*pow(10, -3), 6.99*pow(10, -4)]
DISTANCE_DISTRIB_TYPE_IA=[2.02,2.73, 3.42,4.13,4.77, 5.39, 5.95, 6.42, 6.86, 7.30, 7.65, 8.04, 8.39, 8.75, 9.12, 9.52, 10.4, 11.7, 13.4,14.6]
def distribution_type_Ia ( r): #r in kiloparsec 5.55= to normalize to 1.
if (r<DISTANCE_DISTRIB_TYPE_IA[0]):
return 5.52*0.042/DISTANCE_DISTRIB_TYPE_IA[0] # 0.042= to have the ratio 0.12/0.4 btw bulge et whole galaxy
else :
if (r>DISTANCE_DISTRIB_TYPE_IA[19]):
return 0.
else :
j=1
while (r>DISTANCE_DISTRIB_TYPE_IA[j]):
j=j+1 #when it stops r btw DIST[j-1]and DIST[j]
return 5.52*DISTRIB_TYPE_IA[j-1]+(r-DISTANCE_DISTRIB_TYPE_IA[j-1])/(DISTANCE_DISTRIB_TYPE_IA[j]-DISTANCE_DISTRIB_TYPE_IA[j-1])*(DISTRIB_TYPE_IA[j]-DISTRIB_TYPE_IA[j-1])
def integrate_distribution_type_Ia ( r):
j=0
step_int=100
R_INTEGRATION=np.linspace(0,r,step_int)
integrale=0.
for j in range (1,len(R_INTEGRATION)):
integrale=integrale+distribution_type_Ia((R_INTEGRATION[j]+R_INTEGRATION[j-1])/2.)*(R_INTEGRATION[j]-R_INTEGRATION[j-1])
return integrale
step_integration_radial_distribution_typeIa=100
RADIAL_SNR_DISTRIBUTION_TYPEIA=np.linspace(0,17,step_integration_radial_distribution_typeIa)
def place_SN_typeIa (self):
q=0
a=np.random.uniform(0.,1.)
while (integrate_distribution_type_Ia(RADIAL_SNR_DISTRIBUTION_TYPEIA[q])< a and q<step_integration_radial_distribution_typeIa-1):
q=q+1
self.pos_r=(RADIAL_SNR_DISTRIBUTION_TYPEIA[q]+RADIAL_SNR_DISTRIBUTION_TYPEIA[q-1])/2.
def place_SN_theta_typeIa (self):
self.pos_theta=np.random.uniform(0.,2.*np.pi)
def place_SN_z_typeIa (self):
size=1000
Z=np.linspace(-1.5,1.5,size)
I=np.zeros(size)
for i in range (1,size):
I[i]=I[i-1]+nH1(self.pos_r, (Z[i-1]+Z[i])/2.)*(Z[i]-Z[i-1])
p=0
a=np.random.uniform(0.,I[size-1])
while (I[p]<a and p<len(I)-1):
p=p+1
self.pos_z=(Z[p]+Z[p+1])/2.
#--------------------------------------------------------
# DISTRIBUTION TYPE II - Bad but OK, should be improved
rho0_SNR=1.96 # normalisation de rho_SNR;
r0=17.2 # //kiloparsec
beta0=0.13 # kiloparsec
theta0=0.08
rhomax_SNR=1.8506
def Rrho_SNR(r ): # // r entrée en kiloparsec
if (r<=r0*(1-theta0/np.pi)):
return r*rho0_SNR*np.sin(np.pi*(r/r0)+theta0)*np.exp(-r*beta0)
else :
return 0.
number_arm=4
K_galaxy = [4.25,4.25,4.89,4.89]
r0_galaxy=[3.48,3.48,4.90,4.90]
theta0_galaxy=[0.,np.pi,2.52,-0.62]
step_integration_radial_distribution=100
RADIAL_SNR_DISTRIBUTION=np.linspace(0.,17., step_integration_radial_distribution)
INTEGRALE_SNR_RADIAL_DISTRIBUTION=np.zeros(step_integration_radial_distribution)
for i in range (1,step_integration_radial_distribution):
INTEGRALE_SNR_RADIAL_DISTRIBUTION[i]=INTEGRALE_SNR_RADIAL_DISTRIBUTION[i-1]+Rrho_SNR((RADIAL_SNR_DISTRIBUTION[i]+RADIAL_SNR_DISTRIBUTION[i-1])/2.)*(RADIAL_SNR_DISTRIBUTION[i]-RADIAL_SNR_DISTRIBUTION[i-1])
def place_SN_typeII (self):
a=np.random.uniform(0., INTEGRALE_SNR_RADIAL_DISTRIBUTION[step_integration_radial_distribution-1])
q=0
while (INTEGRALE_SNR_RADIAL_DISTRIBUTION[q]<a):
q=q+1
self.pos_r=(RADIAL_SNR_DISTRIBUTION[q]+RADIAL_SNR_DISTRIBUTION[q-1])/2.
def place_SN_theta_typeII (self):
p=np.random.randint(0, 3)
theta_correction=np.random.uniform(0, 2*np.pi)
theta=(K_galaxy[p]*np.log(self.pos_r/r0_galaxy[p])+theta0_galaxy[p]+theta_correction*np.exp(-0.35*self.pos_r))
# Correction postion with respect to R and theta
self.pos_r=np.random.normal(self.pos_r, 0.07*self.pos_r)
while (theta>2*np.pi):
theta=theta-2*np.pi
while (theta<0):
theta=theta+2*np.pi
self.pos_theta=theta
def place_SN_z_typeII (self):
size=1000
Z=np.linspace(-1.5,1.5,size)
I=np.zeros(size)
for i in range (1,size):
I[i]=I[i-1]+nH2(self.pos_r, (Z[i-1]+Z[i])/2.)*(Z[i]-Z[i-1])
p=0
a=np.random.uniform(0.,I[size-1])
while (I[p]<a and p<len(I)-2):
p=p+1
self.pos_z=(Z[p]+Z[p+1])/2.
def place_supernova(self):
if(self.type==1):
self.place_SN_typeIa()
self.place_SN_theta_typeIa ()
self.place_SN_z_typeIa ()
else :
self.place_SN_typeII()
self.place_SN_theta_typeII ()
self.place_SN_z_typeII()
def typical_associated_parameters(self):
if (self.type==1) :
self.Mej=1.4
self.E_SN=1.
else:
if (self.type==2):
self.Mej=8.
self.E_SN=1.
else:
if(self.type==3):
self.Mej=2.
self.E_SN=1.
else:
if(self.type==4):
self.Mej=1.
self.E_SN=3.
def GG (lambda1, x ) : # fonction réelle, la variable doit être entre 0 et 1 ;
v= -lambda1
s=0
p=0 # variable compteur
while (s<x):
p=p+1
v=v+np.log(lambda1)-np.log(p)
s=s+np.exp(v)
# la boucle stop : donc variable est entre p-1 et p
return p-1
def draw_number_SNRs_in_Galaxy(age_sample):
SN_rate=3.
mean=SN_rate*age_sample*10. #mean number of SN we want
return GG(mean,np.random.uniform(0.,1.))
def density_around(self):
self.tau=0.
r=self.pos_r
z=self.pos_z
if (self.type==1):
self.n0=nH(r,z)
self.rho0=self.n0*(1.67*pow(10,-24.))
else :
self.alpha1=alpha1_def(self.Mdot,self.uw6)
self.n0=nH(r, z)
self.rho0=self.n0*(1.67*pow(10,-24.))
self.n2=0.01*pow(pow(self.n0,19.)*pow(self.t6,-22.),1/35.)
self.rho2= self.n2*(1.67*pow(10,-24.))
self.Tb=1.6*pow(10., 6.)*pow(self.n0, 2/35.)*pow(self.V_2000*self.Mdot/10, 8/35.)*pow(self.t6,-6/35.)
self.r1=(self.alpha1*pow(self.uw6*pow(10, 6),2.))/(4*np.pi*self.n2*self.Tb*k_boltzmann_erg*pow(parsec, 2.))
self.r2 = 28.*pow(self.L36/(mu*self.n0) ,1./5.)*pow(self.t6,3./5.) # 35 ?
def calculate_final_evolution (self):
if(self.type==1):
self.Rshock=self.Rshock_1(self.age)
self.Ushock=self.Ushock_1(self.age)
else :
self.Rshock=self.Rshock_2(self.age)
self.Ushock=self.Ushock_2(self.age)
def set_factor_Emax(self,KNEE):
if (self.type==1) :
self.Emax_factor= KNEE/(self.Emax_t(self.Transitiontime1()))
else :
self.Emax_factor= KNEE/(self.Emax_t(self.Transitiontime2()))
#------ DISTANCE FUNCTIONS from NOVA
def distance_plan (self):
X=np.sin(self.pos_theta)/(distance_SS_GC/self.pos_r-np.cos(self.pos_theta))
d1=np.absolute(1/(np.sqrt(1+X**2.))*(-self.pos_r*np.cos(self.pos_theta) + self.pos_r*np.sin(self.pos_theta)*X + distance_SS_GC ) )
return d1
def b_calculated(self):
return np.arctan(self.pos_z/self.distance_plan())*(180/np.pi)
def l_calculated(self):
return np.sign(self.pos_theta)*np.arccos((-self.pos_r*np.cos(self.pos_theta)+distance_SS_GC)/self.distance_plan())*(180/np.pi)
def distance (self):
X=np.sin(self.pos_theta)/(distance_SS_GC/self.pos_r-np.cos(self.pos_theta))
d1=np.absolute(1/(np.sqrt(1+X**2.))*(-self.pos_r*np.cos(self.pos_theta) + self.pos_r*np.sin(self.pos_theta)*X + distance_SS_GC ) )
self.dist=np.sqrt(d1**2.+self.pos_z**2.)
return self.dist
# ---------------------------------------------------------------------------#
# EMAX FUNCTIONS to match the knee and corresponding amplified field
def Emax_t (self, t):
self.Emax_mem=self.Emax_factor*self.Emax_OK2015(t)
return self.Emax_mem
def B_amplified_OK2015 (self, t):
xi_B_correction_factor=1.
self.M_A=23.
if(self.type==1):
VA0=xi_B_correction_factor*self.B0/(4*np.pi*self.rho0)**(0.5)
return self.B0*self.sigma*np.sqrt(pow(self.Ushock_t(t),2.)/pow((self.M_A)*VA0*pow(10, -6.), 2.)+1.)
else :
VA0=xi_B_correction_factor*self.B0/(4*np.pi*self.rho_r(self.Rshock_t(t)))**(0.5)
return self.B0*self.sigma*np.sqrt(pow(self.Ushock_t(t),2.)/pow((self.M_A)*VA0*pow(10, -6.), 2.)+1.)
def B_amplified_knee ( self, t):
calc=self.Emax_factor*self.B_amplified_OK2015(t)
return max(self.sigma*self.B0, calc)
def Emax_OK2015 (self, t): # // in TeV
if (self.type==1):
self.delta_Emax=-np.log10(self.EMAX_B_dependant_t_OK2015(t+2.)/self.EMAX_B_dependant_t_OK2015(t))/np.log10(self.Rshock_t(t+2.)/self.Rshock_t(t))
A_test=self.EMAX_B_dependant_t_OK2015(t)/pow(self.Rshock_t(t),-self.delta_Emax)
return A_test*pow(self.Rshock_t(t),-self.delta_Emax)
else :
self.delta_Emax=2.;
return self.EMAX_B_dependant_t_OK2015(t)
def EMAX_B_dependant_t_OK2015 ( self, t):
xi_B_correction_factor=1.
if(self.type==1):
VA0=xi_B_correction_factor*self.B0/(4*np.pi*self.rho0)**(0.5)
U1=self.Ushock_t(t)
R1=self.Rshock_t(t)
return pow(10, -12.)*3*pow(10, -6.)*pow(10, -8.)*self.chi*R1*parsec*U1*(self.B_amplified_OK2015(t)/self.sigma)*1./(pow(pow((VA0*self.M_A*pow(10, -6.))/U1, 2.)+1, 1.5))
else :
R2=self.Rshock_t(t)
VA0=xi_B_correction_factor*self.B0/(4*np.pi*self.rho_r(R2))**(0.5)
# print ('JOJOOJJJO' )
# print('self.rho_r(R2)=', self.rho_r(R2), 'self.rho0', self.rho0, ' R2 =', R2, ' t = ', t, ' r2 = ', self.r2)
U2=self.Ushock_t(t)
return pow(10, -12.)*3*pow(10, -6.)*pow(10, -8.)*self.chi*R2*parsec*U2*(self.B_amplified_OK2015(t)/self.sigma)*1./(pow(pow((VA0*self.M_A*pow(10, -6.))/U2, 2.)+1, 1.5))
# ---------------------------------------------------------------------------#
# EMAX electrons
def Emax_electron_vannoni_time (self, t): # // in TeV , B1 in microgauss
rtot=6. # 5.2
B1=self.B0 #; // B0 // 3 ?
beta_B=1./(pow(2*pow(rtot,2)/(3)+1/3,0.5)) # // compression ratio of the B field
K_adjust=1.
B2=self.B_amplified_knee(t) # // double B2=etha_elec*B_volk(tage);
Ush=self.Ushock_t(t)
if (B1<5.): # { // B1< 10 formely
E_temp= mc2_electron_TeV*pow(((1-1./rtot)*pow(Ush, 2.)*900./(K_adjust) )/( ( ((B1*pow(10, -6.))/(8*np.pi)+self.Urad/(B1*pow(10, -6.)) +rtot*B2*pow(10, -6.))/(8*np.pi)+self.Urad/(B2*pow(10, -6.)))*4.*sigma_thomson*pow(c, 2.)*6.241*pow(10, 11.)),1./2.)
else :
E_temp= mc2_electron_TeV*pow(((1-1./rtot)*pow(Ush, 2.)*8*np.pi*900./(K_adjust) )/( ( B1+rtot*B2)*pow(10, -6.)*4.*sigma_thomson*pow(c,2.)*6.241*pow(10,11.)),1./2.)
# E_temp2=0.05*pow(10, -12.)*(900.)*Ush1*B1*pow(10, -6.)*self.Rshock_t(t)*parsec/c # // si tacc=tage
self.Emax_electron_vannoni_time_mem=min(self.Emax_t(t), E_temp)
return self.Emax_electron_vannoni_time_mem
def Estar_electron_time ( self, t): # // E in TeV, B in MicroGauss
B1=self.B0
expression = 0.624*((pow(mc2_electron_erg,2.))/((4./3.)*sigma_thomson*c*(pow(self.B0*pow(10, -6.),2.)/(8*np.pi))))*(1./(t*kyear_sec)-self.Ushock_t(t)/(self.Rshock_t(t)*parsec))
self.Estar_electron_time_mem=max(0.001, expression)
return self.Estar_electron_time_mem
# ---------------------------------------------------------------------------#
# NORMALIZATION FOR THE GAMMA RAYS FROM SNR
def A( self, r,t):
a=2.-self.alpha
if (self.type==1):
RR=self.Rshock_t(t)
return ( 3.*self.eta*a*self.rho0*0.624*pow(self.Ushock1_r(RR*pow(r/RR, self.sigma)),2.)*pow(r/RR , (1-self.sigma)*(-4+a) ) )/((pow(r/RR,-a*self.delta_Emax*self.sigma)*pow(self.Emax_t(t),a)-pow(masseproton_TeV,a)))
else :
RR=self.Rshock_t(t)
return ( 3.*self.eta*a*self.rho_r(pow(r/RR,self.sigma-1.)*r)*0.624*pow(self.Ushock2_r(RR*pow(r/RR, self.sigma)),2.)*pow(r/RR , (1-self.sigma)*(-4+a)))/((pow(r/RR,-a*self.delta_Emax*self.sigma)*pow(self.Emax_t(t),a)-pow(masseproton_TeV,a )))
def density_inside ( self, r,t): #// r en parsec, t en kyear
if (self.type==1):
return self.rho0*self.sigma*pow((r/self.Rshock_t(t)),3*(self.sigma-1))
else :
RR= self.Rshock_t(t)
return self.sigma*self.rho_r(pow(r,self.sigma)/pow(RR,self.sigma-1.))*pow(r/RR,3*(self.sigma-1.))
def Norm_hadronic (self, t):
N=100
Rsh=self.Rshock_t(t)
R=np.linspace(0.,Rsh,N)
norm=0.
for i in range (0,len(R)-1):
norm=norm+ pow(((R[i]+R[i+1])/2.)*parsec,2.)*self.density_inside(((R[i]+R[i+1])/2.), self.age)*self.A(((R[i]+R[i+1])/2.),t)*(R[i+1]-R[i])*parsec/masseproton
self.Norm_hadronic_mem =4*np.pi*norm
return self.Norm_hadronic_mem
#--------------------------------------------------------
# Startting here we define functions outside the class
#calculate GAMMA - with the help of NAIMA
def spectrum_proton(self,time):
return naima.models.ExponentialCutoffPowerLaw(self.Norm_hadronic(time)/u.erg,1*u.TeV,self.alpha,self.Emax_t(time) *u.TeV,beta=1)
def spectrum_proton_old_school (self, E,time): # this is needed for the secondaries
return self.Norm_hadronic(time)/(0.624)*(E/1.)**(-self.alpha)*np.exp(-(E/self.Emax_t(time)))
def spectrum_electron(self,time):
return naima.models.ExponentialCutoffBrokenPowerLaw(self.Kep*self.Norm_hadronic(time)/u.erg,1*u.TeV,self.Estar_electron_time(time) *u.TeV,self.alpha,-1,self.Emax_electron_vannoni_time(time)*u.TeV,beta=1)
def diff_spectrum_hadronic(self,time):
self.dist=self.distance()
PROTONS=self.spectrum_proton(time)
PIONS_FROM_SHELL=PionDecay(PROTONS, nh=self.n0 * u.cm** -3)
GAMMAS=PIONS_FROM_SHELL.sed(self.ENERGY,self.dist * u.kpc)
# GAMMAS.to(u.eV/(u.cm **2 * u.s))
GAMMAS=GAMMAS.to(u.TeV/(u.cm**2 *u.s))
return GAMMAS
def diff_spectrum_leptonic(self,time):
self.dist=self.distance()
ELECTRONS=self.spectrum_electron(time)
IC = InverseCompton(ELECTRONS, seed_photon_fields=['CMB'])
GAMMAS=IC.sed(self.ENERGY,self.dist * u.kpc)
GAMMAS_TeV=GAMMAS.to(u.TeV/(u.cm**2 *u.s))
return GAMMAS_TeV
def diff_spectrum_total (self,time):
GAMMA_TOT=self.diff_spectrum_hadronic(time)# +self.diff_spectrum_leptonic(time)
return GAMMA_TOT
def calculate_alpha_gamma (self,E):
p=1
E_LOCAL=np.array(self.ENERGY)
while (E>E_LOCAL[p] and p<len(E_LOCAL-1)):
p=p+1
p_up=1
while (10*E>E_LOCAL[p_up] and p_up<len(E_LOCAL-1)):
p_up=p_up+1
GAMMA=np.array(self.diff_spectrum_total()/self.ENERGY[p]**2)
self.alpha_gamma_memory=-(np.log10(GAMMA[p_up])-np.log10(GAMMA[p]))/(np.log10(E_LOCAL[p_up]-E_LOCAL[p]))
return self.alpha_gamma_memory
def calculate_diff_spectrum_TIME (self):
self.LGAMMA_DIFF_T=np.zeros((len(self.TIME), len(self.ENERGY)))
for t in range (0,len(self.TIME)):
SPEC=np.array(self.diff_spectrum_total(self.TIME[t]))
for i in range (0,len(self.ENERGY)):
self.LGAMMA_DIFF_T[t][i]=SPEC[i]
#--------------------------------------------------------#
# DEFINING CLASSES
class SNR:
# SN=Supernova()
age=1.
pos_r=1.
pos_z=1.
pos_theta=1.
type=1
Rshock=1.
ushock=1.
alpha=1. #indice spectre proton
Kep=1.
dist=1.
size=1.
Mej=1.
Mdot=1. # Mass loss rate wind
E_SN=1.
r1=1.
r2=1.
rho0=1.
rho2=1.
alpha1=1. #coefficient of the wind
t6=1.
L36=1. #énergie cinétique du vent
uw6=1. # wind velocity
Tb=1. # Temperature in the bubble in K.
V_2000=1. # // speed in 2000 km/s units
tau=0. # absorption
Urad=0.25*1.602*pow(10, -12) # erg.cm-3
M_A=23.
n0=1.
B0=3.
gamma_ad=4./3.
beta=6*(gamma_ad-1)/(gamma_ad+1)
age=1.
sigma=4.
chi=0.1 # fraction of diffusion length before escape
eta=0.1 #efficiency of particle acceleration at the shock
Emax=1.
delta_Emax=2.
Emax_factor=1. # to match the knee
Emax_mem=1.
Estar_electron_time_mem=1.
Emax_electron_vannoni_time_mem=1.
Norm_hadronic_mem=1.
distance=distance
distance_plan=distance_plan
b_calculated=b_calculated
l_calculated=l_calculated
# Metthods used to parametrize the SNRs
Rshock_1=Rshock_1
Rshock_2=Rshock_2
Ushock_1=Ushock_1
Ushock_2=Ushock_2
Rshock_type2=Rshock_type2
Ushock2_r=Ushock2_r
Ushock1_r=Ushock1_r
Rshock_t=Rshock_t
Ushock_t=Ushock_t
B=B
Tequalitymass2=Tequalitymass2
rho0=rho0
rho_r=rho_r
rho1_r=rho1_r
M=M
M0=M0
M1=M1
M2=M2
t_r=t_r
Transitiontime1=Transitiontime1
Transitiontime2=Transitiontime2
Emax_t=Emax_t
Emax_OK2015=Emax_OK2015
B_amplified_OK2015=B_amplified_OK2015
B_amplified_knee=B_amplified_knee
EMAX_B_dependant_t_OK2015=EMAX_B_dependant_t_OK2015
INT=INT
INT0=INT0
INT1=INT1
INT2=INT2
set_factor_Emax=set_factor_Emax
assign_type=assign_type
assign_age=assign_age
place_supernova=place_supernova
typical_associated_parameters=typical_associated_parameters
place_SN_typeIa=place_SN_typeIa
place_SN_theta_typeIa=place_SN_theta_typeIa
place_SN_z_typeIa=place_SN_z_typeIa
place_SN_typeII=place_SN_typeII
place_SN_theta_typeII=place_SN_theta_typeII
place_SN_z_typeII=place_SN_z_typeII
calculate_final_evolution=calculate_final_evolution
density_around=density_around
# Arrays for spectra
ENERGY=np.logspace(-1,4,40)* u.TeV
#for a given time
GAMMAS_H=np.zeros(len(ENERGY)) * u.TeV/(u.cm**2 *u.s)
GAMMAS_L=np.zeros(len(ENERGY)) * u.TeV/(u.cm**2 *u.s)
integrated_hadronic_memory=1/(u.cm**2 * u.s )
integrated_leptonic_memory=1/(u.cm**2 * u.s )
integrated_memory_total=1.
# ---------------------------------------------------------------------------#
# NORMALIZATION FOR THE GAMMA RAYS FROM SNR
time_step=0.1 # kiloyear
number=int((age)/time_step)
TIME=np.linspace(1.,1.,1)
LGAMMA_HADRONIC_T=np.zeros((len(TIME), len(ENERGY)))
LGAMMA_LEPTONIC_T=np.zeros((len(TIME), len(ENERGY)))
LGAMMA_DIFF_T=np.zeros((len(TIME), len(ENERGY)))
Emax_electron_vannoni_time=Emax_electron_vannoni_time
Estar_electron_time=Estar_electron_time
A=A
density_inside=density_inside
Norm_hadronic=Norm_hadronic
spectrum_proton=spectrum_proton
spectrum_proton_old_school=spectrum_proton_old_school
spectrum_electron=spectrum_electron
diff_spectrum_hadronic= diff_spectrum_hadronic
diff_spectrum_leptonic=diff_spectrum_leptonic
diff_spectrum_total=diff_spectrum_total
calculate_alpha_gamma=calculate_alpha_gamma
calculate_diff_spectrum_TIME=calculate_diff_spectrum_TIME
calculate_integrated_spectrum_SGSO=calculate_integrated_spectrum_SGSO
def one_realization_only_pevatrons (a, Kep, D,eta, KNEE):
N=draw_number_SNRs_in_Galaxy(age_sample)
print('Number of simulated objects= ', N )
LIST_SNR=[]
for i in range (0,N):
print( 'realisation i/N=', i*1./N*100,'%')
SNR_temp=SNR()
SNR_temp.assign_type()
SNR_temp.place_supernova()
SNR_temp.assign_age(age_sample)
SNR_temp.typical_associated_parameters()
SNR_temp.density_around()
SNR_temp.calculate_final_evolution()
SNR_temp.set_factor_Emax(KNEE)
SNR_temp.alpha=a
SNR_temp.Kep=Kep
SNR_temp.distance()
SNR_temp.size=2*SNR_temp.Rshock/(SNR_temp.dist*pow(10., 3.))*3437.75
#### Calculating the gammas from the SNR :
SNR_temp.eta=eta
# if (SNR_temp.Emax_t(SNR_temp.age)> definition_pevatron):
# print( ' ------------------------')
# print (' New pevatron ' )
SNR_temp.calculate_diff_spectrum_TIME()
LIST_SNR.append(SNR_temp)
return LIST_SNR
def many_realizations (a,Kep, D, eta, KNEE, M):
BIG_LIST=[]
for i in range (0, M):
LIST_SNR=one_realization(a, Kep, D,eta, KNEE)
BIG_LIST.append(LIST_SNR)
return BIG_LIST
def print_one_SNR (SNR):
print ('----------------')
print (' ')
print ('pos_r =', SNR.pos_r)
print ('pos_theta=', SNR.pos_theta)
print ('pos_z =', SNR.pos_z)
print ('type =', SNR.type)
print ('age =', SNR.age)
print ('size =', SNR.size)
print ('Rshock =', SNR.Rshock_t(SNR.age))
print ('alpha =', SNR.alpha)
print ('Norm =', SNR.Norm_hadronic(SNR.age))
print ('Emax_proton =', SNR.Emax_t(SNR.age))
print ('Kep =', SNR.Kep)
print ('Estar =', SNR.Estar_electron_time (SNR.age))
print ('Emax_electron =', SNR.Emax_electron_vannoni_time (SNR.age))
print ('diff spectrum :')
for i in range (0,len(SNR.ENERGY)):
for t in range (0,len(SNR.TIME)):
print ( 'time= ', SNR.TIME[t], ' bin=', SNR.ENERGY[i], ' diff=', SNR.LGAMMA_DIFF_T[t][i])
print (' ')
def save_one_LIST_to_file (LIST,file):
with open(file, 'w') as text_file:
writer = csv.writer(text_file, delimiter='\t')
writer.writerow(["Num_SNR","pos_r", \
"pos_theta","pos_z","n0", \
"type", "age" ,"size", \
"Rsh","a","Norm",\
"Emax_proton","Kep","Estar", \
"Emax_electron","Time[kyr]","E[TeV]", \
"diff_spectrum[TeVcm-2s-1]"])
for i in range (0,len(LIST)):
ENERGY=np.array(LIST[i].ENERGY)
for j in range (0,len(LIST[i].ENERGY)):
# print('i/Len(LIST) =', i/len(LIST)*(100.) , '%')
##print( ' Norm = ', LIST[i].Norm_hadronic_mem, ' dist = ',LIST[i].dist )
writer.writerow((i,LIST[i].pos_r, \
LIST[i].pos_theta, LIST[i].pos_z,LIST[i].n0, \
LIST[i].type, LIST[i].age, LIST[i].size, \
LIST[i].Rshock,LIST[i].alpha, LIST[i].Norm_hadronic_mem, \
LIST[i].Emax_mem,LIST[i].Kep,LIST[i].Estar_electron_time_mem, \
LIST[i].Emax_electron_vannoni_time_mem ,LIST[i].age, ENERGY[j], \
LIST[i].LGAMMA_DIFF_T[len(LIST[i].TIME)-1][j]))
def get_first_nbr_from_str(input_str):
if not input_str and not isinstance(input_str, str):
return 0
out_number = ''
for ele in input_str:
if (ele == '.' and '.' not in out_number) or ele.isdigit():
out_number += ele
elif out_number:
break
return int(float(out_number))
| 33.570975
| 368
| 0.608154
|
2d701408adb738669772c582b17bed3dd27b02a2
| 589
|
py
|
Python
|
AC/IntroPython/functions/h1.py
|
samirsaravia/ubiquitous-octo-fortnigh
|
c197945e7e849ddfece02e34e0c6b8f03e50c7dd
|
[
"MIT"
] | null | null | null |
AC/IntroPython/functions/h1.py
|
samirsaravia/ubiquitous-octo-fortnigh
|
c197945e7e849ddfece02e34e0c6b8f03e50c7dd
|
[
"MIT"
] | 1
|
2021-03-04T22:03:05.000Z
|
2021-03-04T22:03:05.000Z
|
AC/IntroPython/functions/h1.py
|
samirsaravia/ubiquitous-octo-fortnight
|
c197945e7e849ddfece02e34e0c6b8f03e50c7dd
|
[
"MIT"
] | null | null | null |
# criando funções (def)
# def diga_oi(nome='Mundo', comprimentar=None):
# if comprimentar is None:
# print(f'Olá {nome}!!!')
# else:
# print(f'{comprimentar}, {nome}!!')
#
#
# diga_oi('Zé mané') # com argumento, modifica o padrão
# diga_oi() # sem argumento, executa o padrão
#
# diga_oi('pudim', comprimentar='Oi')
# diga_oi('pudim')
#
# print(type(None)) # é a classe NoneType, que não existe,mas pode ser testada
def add_dois_numeros(x, y):
return x + y
add_dois_numeros(34, 2) # ignorado
resultado = add_dois_numeros(23432, 54672)
print(resultado)
| 22.653846
| 79
| 0.65365
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.