hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f302d1207e3d19d117a4ba482f9a70b75a5a7ec7
| 2,512
|
py
|
Python
|
setup.py
|
jamesgleave/JG_Mask_RCNN_Blueprint
|
26edc2f931970a37ac2494991e26b8ff082b9dae
|
[
"MIT"
] | 1
|
2019-07-10T19:04:02.000Z
|
2019-07-10T19:04:02.000Z
|
setup.py
|
jamesgleave/JG_Mask_RCNN_Blueprint
|
26edc2f931970a37ac2494991e26b8ff082b9dae
|
[
"MIT"
] | null | null | null |
setup.py
|
jamesgleave/JG_Mask_RCNN_Blueprint
|
26edc2f931970a37ac2494991e26b8ff082b9dae
|
[
"MIT"
] | null | null | null |
"""
The build/compilations setup
pip install -r requirements.txt
python setup.py install
"""
import pip
import logging
import pkg_resources
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def _parse_requirements(file_path):
pip_ver = pkg_resources.get_distribution('pip').version
pip_version = list(map(int, pip_ver.split('.')[:2]))
if pip_version >= [6, 0]:
raw = pip.req.parse_requirements(file_path,
session=pip.download.PipSession())
else:
raw = pip.req.parse_requirements(file_path)
return [str(i.req) for i in raw]
# parse_requirements() returns generator of pip.req.InstallRequirement objects
try:
install_reqs = _parse_requirements("requirements.txt")
except Exception:
logging.warning('Fail load requirements file, so using default ones.')
install_reqs = []
setup(
name='mask-rcnn',
version='2.1',
url='https://github.com/matterport/Mask_RCNN',
author='Matterport',
author_email='waleed.abdulla@gmail.com',
license='MIT',
description='Mask R-CNN for object detection and instance segmentation',
packages=["mrcnn"],
install_requires=install_reqs,
include_package_data=True,
python_requires='>=3.4',
long_description="""This is an implementation of Mask R-CNN on Python 3, Keras, and TensorFlow.
The model generates bounding boxes and segmentation masks for each instance of an object in the image.
It's based on Feature Pyramid Network (FPN) and a ResNet101 backbone.""",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Scientific/Engineering :: Image Segmentation",
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords="image instance segmentation object detection mask rcnn r-cnn tensorflow keras",
)
| 36.405797
| 103
| 0.677548
|
590f9f91062879dcb6199642d5527fff94de93d8
| 2,853
|
py
|
Python
|
controller.py
|
sbustars/STARS
|
46a7475fa5d8afe9851581a17ad803b063525fbf
|
[
"MIT"
] | 9
|
2020-11-05T21:26:09.000Z
|
2022-03-04T15:24:40.000Z
|
controller.py
|
sbustars/STARS
|
46a7475fa5d8afe9851581a17ad803b063525fbf
|
[
"MIT"
] | 7
|
2020-11-06T15:59:40.000Z
|
2021-08-31T16:36:43.000Z
|
controller.py
|
sbustars/STARS
|
46a7475fa5d8afe9851581a17ad803b063525fbf
|
[
"MIT"
] | 1
|
2021-07-13T21:55:02.000Z
|
2021-07-13T21:55:02.000Z
|
from threading import Lock, Event
from interpreter.debugger import Debug
from interpreter.interpreter import Interpreter
import settings
'''
https://github.com/sbustars/STARS
Copyright 2020 Kevin McDonnell, Jihu Mun, and Ian Peitzsch
Developed by Kevin McDonnell (ktm@cs.stonybrook.edu),
Jihu Mun (jihu1011@gmail.com),
and Ian Peitzsch (irpeitzsch@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
class Controller():
def __init__(self, debug: Debug, interp: Interpreter):
self.debug = debug
self.interp = interp
def set_interp(self, interp: Interpreter) -> None:
self.interp = interp
self.debug = interp.debug
def set_pause(self, pause: bool) -> None:
if not pause:
self.interp.pause_lock.clear()
else:
self.interp.pause_lock.set()
def pause(self, pause: bool) -> None:
self.debug.continueFlag = not pause
if pause:
self.interp.pause_lock.clear()
else:
self.interp.pause_lock.set()
def get_byte(self, addr: int, signed: bool = False) -> int:
return self.interp.mem.getByte(addr, signed=signed, admin=True)
def add_breakpoint(self, cmd):
self.debug.addBreakpoint(cmd, self.interp)
def remove_breakpoint(self, cmd):
self.debug.removeBreakpoint(cmd, self.interp)
def reverse(self):
self.debug.reverse(None, self.interp)
def good(self) -> bool:
return self.interp is not None
def cont(self) -> bool:
return self.debug.continueFlag
def setSetting(self, key: str, val: bool) -> None:
settings.settings[key] = val
def get_labels(self):
return self.interp.mem.labels
def get_reg_word(self, reg):
return self.interp.get_reg_word(reg)
def get_instr_count(self):
return self.interp.instruction_count
| 38.554054
| 460
| 0.711532
|
47091755fb6c463386dcdcd04708aa79a7e0996b
| 4,159
|
py
|
Python
|
_py2tmp/ir0_optimization/_optimization_execution.py
|
google/tmppy
|
faf67af1213ee709f28cc5f492ec4903c51d4104
|
[
"Apache-2.0"
] | 27
|
2017-10-02T01:17:35.000Z
|
2021-10-16T23:31:46.000Z
|
_py2tmp/ir0_optimization/_optimization_execution.py
|
google/tmppy
|
faf67af1213ee709f28cc5f492ec4903c51d4104
|
[
"Apache-2.0"
] | 51
|
2017-10-01T09:38:22.000Z
|
2018-10-13T16:39:39.000Z
|
_py2tmp/ir0_optimization/_optimization_execution.py
|
google/tmppy
|
faf67af1213ee709f28cc5f492ec4903c51d4104
|
[
"Apache-2.0"
] | 9
|
2017-11-04T13:12:27.000Z
|
2021-10-16T23:31:38.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
from typing import Iterator, Callable, Tuple, List, Union
from _py2tmp.compiler.stages import header_to_cpp
from _py2tmp.ir0 import ir
from _py2tmp.ir0_optimization._configuration_knobs import ConfigurationKnobs
def apply_elem_optimization(elems: Tuple,
optimization: Callable[[], Tuple[Tuple, bool]],
describe_elems: Callable[[Tuple], str],
optimization_name: str,
other_context: Callable[[], str] = lambda: ''):
if ConfigurationKnobs.max_num_optimization_steps == 0:
return elems, False
ConfigurationKnobs.optimization_step_counter += 1
if ConfigurationKnobs.max_num_optimization_steps > 0:
ConfigurationKnobs.max_num_optimization_steps -= 1
new_elems, needs_another_loop = optimization()
if ConfigurationKnobs.verbose:
original_cpp = describe_elems(elems)
optimized_cpp = describe_elems(new_elems)
if original_cpp != optimized_cpp:
diff = ''.join(difflib.unified_diff(original_cpp.splitlines(True),
optimized_cpp.splitlines(True),
fromfile='before.h',
tofile='after.h'))
print('Original C++:\n' + original_cpp + '\n' + other_context()
+ 'After ' + optimization_name + ':\n' + optimized_cpp + '\n'
+ 'Diff:\n' + diff + '\n')
return new_elems, needs_another_loop
def describe_headers(headers: List[ir.Header],
identifier_generator: Iterator[str]):
return ''.join(header_to_cpp(header, identifier_generator, coverage_collection_enabled=False)
for header in headers)
def describe_template_defns(template_defns: Tuple[ir.TemplateDefn, ...], identifier_generator: Iterator[str]):
return header_to_cpp(ir.Header(template_defns=template_defns,
check_if_error_specializations=(),
toplevel_content=(),
public_names=frozenset(),
split_template_name_by_old_name_and_result_element_name=()),
identifier_generator,
coverage_collection_enabled=False)
def describe_toplevel_elems(toplevel_elems: Tuple[Union[ir.StaticAssert, ir.ConstantDef, ir.Typedef], ...],
identifier_generator: Iterator[str]):
return header_to_cpp(ir.Header(template_defns=(),
toplevel_content=toplevel_elems,
public_names=frozenset(),
split_template_name_by_old_name_and_result_element_name=(),
check_if_error_specializations=()),
identifier_generator,
coverage_collection_enabled=False)
def combine_optimizations(ir, optimizations):
needs_another_loop = False
for optimization in optimizations:
ir, needs_another_loop1 = optimization(ir)
needs_another_loop |= needs_another_loop1
return ir, needs_another_loop
def optimize_list(list, optimization):
needs_another_loop = False
result_list = []
for elem in list:
elem, needs_another_loop1 = optimization(elem)
needs_another_loop |= needs_another_loop1
result_list.append(elem)
return result_list, needs_another_loop
| 45.703297
| 110
| 0.63044
|
0e959c6e8ecedae40842f2226b4eb93c2586a023
| 20,064
|
py
|
Python
|
src/iemws/services/cow.py
|
akrherz/iem-web-services
|
e1e232b020d7b59eacd562b866e045044cbc4e46
|
[
"Apache-2.0"
] | null | null | null |
src/iemws/services/cow.py
|
akrherz/iem-web-services
|
e1e232b020d7b59eacd562b866e045044cbc4e46
|
[
"Apache-2.0"
] | 87
|
2018-03-20T12:57:59.000Z
|
2022-03-30T18:54:36.000Z
|
src/iemws/services/cow.py
|
akrherz/iem-web-services
|
e1e232b020d7b59eacd562b866e045044cbc4e46
|
[
"Apache-2.0"
] | null | null | null |
"""IEM Cow (NWS Storm Based Warning Verification) API
See [IEM Cow](https://mesonet.agron.iastate.edu/cow/) webpage for the user
frontend to this API and for more discussion about what this does.
While this service only emits JSON, the JSON response embeds to GeoJSON objects
providing the storm reports and warnings.
Changed on 2 Sep 2021 to count LSRs valid at warning expiration time as
verifying as per NWS Verification Branch guidance.
"""
from typing import List
from datetime import datetime
import json
import geopandas as gpd
import pandas as pd
from pandas.io.sql import read_sql
from fastapi import Query, APIRouter
from shapely.ops import cascaded_union
from ..util import get_dbconn
ISO9660 = "%Y-%m-%dT%H:%M:%SZ"
LSRTYPE2PHENOM = {
"T": "TO",
"H": "SV",
"G": "SV",
"D": "SV",
"F": "FF",
"x": "FF",
"M": "MA",
"W": "MA",
"2": "DS",
}
router = APIRouter()
class COWSession(object):
"""Things that we could do while generating Cow stats"""
def __init__(
self,
wfo,
begints,
endts,
phenomena,
lsrtype,
hailsize,
lsrbuffer,
warningbuffer,
wind,
windhailtag,
limitwarns,
fcster,
):
"""Build out our session based on provided fields"""
self.wfo = wfo
# Figure out the begin and end times
self.begints, self.endts = begints, endts
# Storage of data
self.events = gpd.GeoDataFrame()
self.events_buffered = None
self.stormreports = gpd.GeoDataFrame()
self.stormreports_buffered = None
self.stats = dict()
# query parameters
self.phenomena = phenomena
if not self.phenomena:
self.phenomena = ["TO", "SV", "FF", "MA", "DS"]
self.lsrtype = lsrtype
if not self.lsrtype:
self.lsrtype = ["TO", "SV", "FF", "MA", "DS"]
self.hailsize = hailsize
self.lsrbuffer = lsrbuffer
self.warningbuffer = warningbuffer
self.wind = wind
self.windhailtag = windhailtag.upper() == "Y"
self.limitwarns = limitwarns.upper() == "Y"
self.fcster = fcster
# our database connection
self.dbconn = get_dbconn("postgis")
def milk(self):
"""Milk the Cow and see what happens"""
self.load_events()
self.load_stormreports()
self.compute_shared_border()
self.sbw_verify()
self.area_verify()
self.compute_stats()
def compute_stats(self):
"""Fill out the stats attribute"""
_ev = self.events
_sr = self.stormreports
self.stats["area_verify[%]"] = (
0
if _ev.empty
else _ev["areaverify"].sum() / _ev["parea"].sum() * 100.0
)
self.stats["shared_border[%]"] = (
0
if _ev.empty
else _ev["sharedborder"].sum() / _ev["perimeter"].sum() * 100.0
)
self.stats["max_leadtime[min]"] = (
None if _sr.empty else _sr["leadtime"].max()
)
self.stats["min_leadtime[min]"] = (
None if _sr.empty else _sr["leadtime"].min()
)
self.stats["avg_leadtime[min]"] = (
None if _sr.empty else _sr["leadtime"].mean()
)
self.stats["tdq_stormreports"] = (
0 if _sr.empty else len(_sr[_sr["tdq"]].index)
)
self.stats["unwarned_reports"] = (
0 if _sr.empty else len(_sr[~_sr["warned"]].index)
)
self.stats["warned_reports"] = (
0 if _sr.empty else len(_sr[_sr["warned"]].index)
)
self.stats["events_verified"] = (
0 if _ev.empty else len(_ev[_ev["verify"]].index)
)
self.stats["events_total"] = len(_ev.index)
self.stats["reports_total"] = len(_sr.index)
if self.stats["reports_total"] > 0:
pod = self.stats["warned_reports"] / float(
self.stats["reports_total"]
)
else:
pod = 0
self.stats["POD[1]"] = pod
if self.stats["events_total"] > 0:
far = (
self.stats["events_total"] - self.stats["events_verified"]
) / self.stats["events_total"]
else:
far = 0
self.stats["FAR[1]"] = far
if pod > 0:
self.stats["CSI[1]"] = (((pod) ** -1 + (1 - far) ** -1) - 1) ** -1
else:
self.stats["CSI[1]"] = 0.0
self.stats["avg_size[sq km]"] = 0 if _ev.empty else _ev["parea"].mean()
self.stats["size_poly_vs_county[%]"] = (
0 if _ev.empty else _ev["parea"].sum() / _ev["carea"].sum() * 100.0
)
# Prevent NaN values from above
for key in self.stats:
if pd.isnull(self.stats[key]):
self.stats[key] = None
def sql_lsr_limiter(self):
"""How to limit LSR types"""
ltypes = []
if "TO" in self.lsrtype:
ltypes.append("T")
if "SV" in self.lsrtype:
ltypes.extend(["H", "G", "D"])
if "FF" in self.lsrtype:
ltypes.extend(["F", "x"])
if "MA" in self.lsrtype:
ltypes.extend(["M", "W"])
if "DS" in self.lsrtype:
ltypes.append("2")
if len(ltypes) == 1:
return " and type = '%s'" % (ltypes[0],)
return " and type in %s " % (tuple(ltypes),)
def sql_fcster_limiter(self):
"""Should we limit the fcster column?"""
if self.fcster is None:
return " "
return " and fcster ILIKE '%s' " % (self.fcster,)
def sql_wfo_limiter(self):
"""get the SQL for how we limit WFOs"""
if "_ALL" in self.wfo or not self.wfo:
return " "
if len(self.wfo) == 1:
return " and w.wfo = '%s' " % (self.wfo[0],)
return " and w.wfo in %s " % (tuple(self.wfo),)
def sql_tag_limiter(self):
"""Do we need to limit the events based on tags"""
if not self.limitwarns:
return " "
return (
" and ((s.windtag >= %s or s.hailtag >= %s) or "
" (s.windtag is null and s.hailtag is null)) "
) % (self.wind, self.hailsize)
def load_events(self):
"""Build out the listing of events based on the request"""
self.events = gpd.read_postgis(
f"""
WITH stormbased as (
SELECT wfo, phenomena, eventid, hailtag, windtag,
geom, significance,
ST_area(ST_transform(geom,2163)) / 1000000.0 as parea,
ST_perimeter(ST_transform(geom,2163)) as perimeter,
ST_xmax(geom) as lon0, ST_ymax(geom) as lat0,
extract(year from issue at time zone 'UTC') as year
from sbw w WHERE status = 'NEW' {self.sql_wfo_limiter()}
and issue >= %s and issue < %s and expire < %s
and significance = 'W'
and phenomena in %s {self.sql_tag_limiter()}
),
countybased as (
SELECT w.wfo, phenomena, eventid, significance,
max(w.status) as statuses,
array_agg(u.ugc) as ar_ugc,
array_agg(u.name ||' '||u.state) as ar_ugcname,
sum(ST_area(ST_transform(u.geom,2163)) / 1000000.0) as carea,
min(issue at time zone 'UTC') as missue,
max(expire at time zone 'UTC') as mexpire,
extract(year from issue at time zone 'UTC') as year, w.fcster
from warnings w JOIN ugcs u on (u.gid = w.gid) WHERE
w.gid is not null {self.sql_wfo_limiter()} and
issue >= %s and issue < %s and expire < %s
and significance = 'W'
and phenomena in %s {self.sql_tag_limiter()}
{self.sql_fcster_limiter()}
GROUP by w.wfo, phenomena, eventid, significance, year, fcster
)
SELECT s.year::int, s.wfo, s.phenomena, s.eventid, s.geom,
c.missue as issue,
c.mexpire as expire, c.statuses, c.fcster,
s.significance, s.hailtag, s.windtag, c.carea, c.ar_ugc,
s.lat0, s.lon0, s.perimeter, s.parea, c.ar_ugcname,
s.year || s.wfo || s.eventid || s.phenomena || s.significance ||
row_number() OVER (PARTITION by s.year, s.wfo, s.eventid, s.phenomena,
s.significance ORDER by c.missue ASC) as key
from stormbased s JOIN countybased c on
(c.eventid = s.eventid and c.wfo = s.wfo and c.year = s.year
and c.phenomena = s.phenomena and c.significance = s.significance)
ORDER by issue ASC
""",
self.dbconn,
params=(
self.begints,
self.endts,
self.endts,
tuple(self.phenomena),
self.begints,
self.endts,
self.endts,
tuple(self.phenomena),
),
crs={"init": "epsg:4326"},
index_col="key",
)
self.events["stormreports"] = [
[] for _ in range(len(self.events.index))
]
self.events["verify"] = False
self.events["lead0"] = None
self.events["areaverify"] = 0
self.events["sharedborder"] = 0
if self.events.empty:
return
s2163 = self.events["geom"].to_crs(epsg=2163)
self.events_buffered = s2163.buffer(self.warningbuffer * 1000.0)
def load_stormreports(self):
"""Build out the listing of storm reports based on the request"""
self.stormreports = gpd.read_postgis(
f"""
SELECT distinct valid at time zone 'UTC' as valid,
type, magnitude, city, county, state,
source, remark, wfo, typetext, ST_x(geom) as lon0, ST_y(geom) as lat0,
geom
from lsrs w WHERE valid >= %s and valid <= %s
{self.sql_wfo_limiter()} {self.sql_lsr_limiter()}
and ((type = 'M' and magnitude >= 34) or type = '2' or
(type = 'H' and magnitude >= %s) or type = 'W' or
type = 'T' or (type = 'G' and magnitude >= %s) or type = 'D'
or type = 'F' or type = 'x') ORDER by valid ASC
""",
self.dbconn,
params=(self.begints, self.endts, self.hailsize, self.wind),
geom_col="geom",
crs={"init": "epsg:4326"},
)
self.stormreports["events"] = [
[] for _ in range(len(self.stormreports.index))
]
self.stormreports["tdq"] = False
self.stormreports["warned"] = False
self.stormreports["leadtime"] = None
self.stormreports["lsrtype"] = self.stormreports["type"].map(
LSRTYPE2PHENOM
)
if self.stormreports.empty:
return
s2163 = self.stormreports["geom"].to_crs(epsg=2163)
self.stormreports_buffered = s2163.buffer(self.lsrbuffer * 1000.0)
def compute_shared_border(self):
"""Compute a stat"""
# re ST_Buffer(simple_geom) see akrherz/iem#163
df = read_sql(
f"""
WITH stormbased as (
SELECT geom, wfo, eventid, phenomena, significance,
extract(year from issue at time zone 'UTC') as year
from sbw w WHERE status = 'NEW' {self.sql_wfo_limiter()}
and issue >= %s and issue < %s and expire < %s
and significance = 'W'
and phenomena in %s {self.sql_tag_limiter()}),
countybased as (
SELECT ST_Union(ST_Buffer(u.simple_geom, 0)) as geom,
w.wfo, phenomena, eventid, significance,
extract(year from issue at time zone 'UTC') as year, w.fcster
from warnings w JOIN ugcs u on (u.gid = w.gid) WHERE
w.gid is not null {self.sql_wfo_limiter()} and
issue >= %s and issue < %s and expire < %s
and significance = 'W'
and phenomena in %s {self.sql_tag_limiter()}
{self.sql_fcster_limiter()}
GROUP by w.wfo, phenomena, eventid, significance, year,
fcster),
agg as (
SELECT ST_SetSRID(ST_intersection(
ST_buffer(ST_exteriorring(
ST_geometryn(ST_multi(c.geom),1)),0.02),
ST_exteriorring(ST_geometryn(
ST_multi(s.geom),1))), 4326) as geo,
c.year, c.wfo, c.phenomena, c.significance, c.eventid
from stormbased s, countybased c WHERE
s.wfo = c.wfo and s.eventid = c.eventid and
s.phenomena = c.phenomena and s.significance = c.significance
and s.year = c.year
)
SELECT sum(ST_Length(ST_transform(geo,2163))) as s,
year || wfo || eventid || phenomena || significance as key
from agg GROUP by key
""",
self.dbconn,
params=(
self.begints,
self.endts,
self.endts,
tuple(self.phenomena),
self.begints,
self.endts,
self.endts,
tuple(self.phenomena),
),
index_col="key",
)
self.events["sharedborder"] = df["s"]
def sbw_verify(self):
"""Verify the events"""
if self.stormreports_buffered is None or self.events_buffered is None:
return
centroids = self.stormreports_buffered.centroid
for eidx, geometry in self.events_buffered.iteritems():
_ev = self.events.loc[eidx]
# Prevent dups?
if isinstance(_ev, pd.DataFrame):
_ev = _ev.iloc[0]
indicies = (self.stormreports["valid"] >= _ev["issue"]) & (
self.stormreports["valid"] <= _ev["expire"]
)
# NB the within operation returns a boolean series sometimes false
for sidx, isinside in (
centroids[indicies].within(geometry).iteritems()
):
if not isinside:
continue
_sr = self.stormreports.loc[sidx]
if _sr["events"]:
continue
verify = False
if _ev["phenomena"] == "FF" and _sr["type"] in ["F", "x"]:
verify = True
elif _ev["phenomena"] == "TO":
if _sr["type"] == "T":
verify = True
else:
self.stormreports.at[sidx, "tdq"] = True
elif _ev["phenomena"] == "DS":
if _sr["type"] == "2":
verify = True
elif _ev["phenomena"] == "MA" and _sr["type"] in [
"W",
"M",
"H",
]:
verify = True
elif _ev["phenomena"] == "SV" and _sr["type"] in [
"G",
"D",
"H",
]:
# If we are to verify based on the windhag tag, then we
# need to compare the magnitudes
if self.windhailtag:
if (
_sr["type"] == "H"
and _sr["magnitude"] >= _ev["hailtag"]
):
verify = True
elif (
_sr["type"] == "G"
and _sr["magnitude"] >= _ev["windtag"]
):
verify = True
elif _sr["type"] == "D": # can't tag verify these
verify = True
else:
verify = True
if not verify:
continue
self.events.at[eidx, "verify"] = True
self.stormreports.at[sidx, "warned"] = True
leadtime = int(
(_sr["valid"] - _ev["issue"]).total_seconds() / 60.0
)
if _sr["leadtime"] is None:
self.stormreports.at[sidx, "leadtime"] = leadtime
if not _ev["stormreports"]:
self.events.at[eidx, "lead0"] = leadtime
self.events.at[eidx, "stormreports"].append(sidx)
self.stormreports.at[sidx, "events"].append(eidx)
def area_verify(self):
"""Do Areal verification"""
if self.events_buffered is None:
return
e2163 = self.events.to_crs(epsg=2163)
for eidx, _ev in e2163.iterrows():
if not _ev["stormreports"]:
continue
# Union all the LSRs into one shape
lsrs = cascaded_union(
self.stormreports_buffered[_ev["stormreports"]]
)
# Intersect with this warning geometry to find overlap
overlap = _ev["geom"].buffer(0).intersection(lsrs)
self.events.loc[eidx, "areaverify"] = overlap.area / 1000000.0
def clean_dataframes(self):
"""Get rid of types we can not handle"""
for df in [self.events, self.stormreports]:
for colname in df.select_dtypes(
include=["datetime64[ns]"]
).columns:
df[colname] = df[colname].dt.strftime(ISO9660)
def _to_csv(val):
"""helper."""
return ",".join([str(s) for s in val])
# Convert hacky column of lists to csv
self.events["stormreports"] = self.events["stormreports"].apply(
_to_csv
)
self.stormreports["events"] = self.stormreports["events"].apply(
_to_csv
)
def handler(
wfo,
begints,
endts,
phenomena,
lsrtype,
hailsize,
lsrbuffer,
warningbuffer,
wind,
windhailtag,
limitwarns,
fcster,
):
"""Handle the request, return dict"""
cow = COWSession(
wfo,
begints,
endts,
phenomena,
lsrtype,
hailsize,
lsrbuffer,
warningbuffer,
wind,
windhailtag,
limitwarns,
fcster,
)
cow.milk()
# Some stuff is not JSON serializable
cow.clean_dataframes()
res = {
"generated_at": datetime.utcnow().strftime(ISO9660),
"params": {
"wfo": cow.wfo,
"phenomena": cow.phenomena,
"lsrtype": cow.lsrtype,
"hailsize": cow.hailsize,
"lsrbuffer": cow.lsrbuffer,
"wind": cow.wind,
"windhailtag": cow.windhailtag,
"limitwarns": cow.limitwarns,
"begints": cow.begints.strftime(ISO9660),
"endts": cow.endts.strftime(ISO9660),
"warningbuffer": cow.warningbuffer,
},
"stats": cow.stats,
"events": json.loads(cow.events.to_json()),
"stormreports": json.loads(cow.stormreports.to_json()),
}
# only include when the easter egg is enabled
if cow.fcster:
res["params"]["fcster"] = cow.fcster
return res
@router.get("/cow.json", description=__doc__)
def cow_service(
wfo: List[str] = Query(
[], min_length=3, max_length=4, title="WFO Identifiers"
),
begints: datetime = Query(...),
endts: datetime = Query(...),
phenomena: List[str] = Query(None, max_length=2),
lsrtype: List[str] = Query(None, max_length=2),
hailsize: float = Query(1),
lsrbuffer: float = Query(15),
warningbuffer: float = Query(1),
wind: float = Query(58),
windhailtag: str = Query("N"),
limitwarns: str = Query("N"),
fcster: str = None,
):
"""Replaced by __doc__."""
return handler(
wfo,
begints,
endts,
phenomena,
lsrtype,
hailsize,
lsrbuffer,
warningbuffer,
wind,
windhailtag,
limitwarns,
fcster,
)
cow_service.__doc__ = __doc__
| 35.637655
| 79
| 0.515152
|
827731909f540ff076d023fd065bbd91b7f2a8c8
| 126
|
py
|
Python
|
nmoo/plotting/__init__.py
|
altaris/noisy-moo
|
86fd5972c8bf9216357cca3abaa0268ff04b1cc0
|
[
"MIT"
] | 1
|
2022-03-17T09:03:17.000Z
|
2022-03-17T09:03:17.000Z
|
nmoo/plotting/__init__.py
|
altaris/noisy-moo
|
86fd5972c8bf9216357cca3abaa0268ff04b1cc0
|
[
"MIT"
] | 5
|
2021-09-13T02:08:12.000Z
|
2021-10-12T03:41:15.000Z
|
nmoo/plotting/__init__.py
|
altaris/noisy-moo
|
86fd5972c8bf9216357cca3abaa0268ff04b1cc0
|
[
"MIT"
] | null | null | null |
"""
Plotting
"""
from .delta_f import generate_delta_F_plots
from .performance_indicators import plot_performance_indicators
| 18
| 63
| 0.84127
|
3682a56d52627e187957410613a6f36b5bb40380
| 2,880
|
py
|
Python
|
tests/test_wavenet_2.py
|
stankevich-mipt/pytorch-wavenet
|
202641b242e1948df05658683aad6348f41c8f7f
|
[
"MIT"
] | 858
|
2017-04-20T13:18:15.000Z
|
2022-03-23T21:07:51.000Z
|
tests/test_wavenet_2.py
|
niyingqiu/pytorch-wavenet
|
26ba28989edcf8688f6216057aafda07601ff07e
|
[
"MIT"
] | 41
|
2017-06-22T02:26:39.000Z
|
2022-03-27T23:29:37.000Z
|
tests/test_wavenet_2.py
|
niyingqiu/pytorch-wavenet
|
26ba28989edcf8688f6216057aafda07601ff07e
|
[
"MIT"
] | 225
|
2017-06-21T06:32:03.000Z
|
2022-03-27T23:28:00.000Z
|
import time
from wavenet_model import WaveNetModel
from wavenet_training import *
from torch.autograd import Variable
import torch
import numpy as np
model = WaveNetModel(layers=7,
blocks=2,
dilation_channels=4,
residual_channels=4,
skip_channels=8,
classes=256,
output_length=8)
# out = model.forward(Variable(torch.zeros((1, 1, 2048))))
# print(out)
print("model: ", model)
print("scope: ", model.receptive_field)
# print("parameter count", model.parameter_count())
# data = WaveNetData('../train_samples/violin.wav',
# input_length=model.receptive_field,
# target_length=model.output_length,
# num_classes=model.classes)
# data = ["../train_samples/hihat.wav",
# "../train_samples/piano.wav",
# "../train_samples/saber.wav",
# "../train_samples/violin.wav",
# "../train_samples/sine.wav",
# "../train_samples/bach_full.wav",
# "../train_samples/sapiens.wav"]
#"/Users/vincentherrmann/Music/Mischa Maisky plays Bach Cello Suite No.1 in G (full).wav"]
data = ["../train_samples/piano.wav"]
data_loader = AudioFileLoader(data,
classes=model.classes,
receptive_field=model.receptive_field,
target_length=model.output_length,
dtype=model.dtype,
ltype=torch.LongTensor,
sampling_rate=44100)
# start_tensor = data.get_minibatch([0])[0].squeeze()
# start_tensor = data_loader.get_wavenet_minibatch(indices=[model.receptive_field],
# receptive_field=model.receptive_field,
# target_length=model.output_length)
optimizer = WaveNetOptimizer(model,
data=data_loader,
validation_segments=4,
examples_per_validation_segment=2,
report_interval=4,
validation_interval=64,
segments_per_chunk=4,
examples_per_segment=8)
# optimizer = Optimizer(model, learning_rate=0.001, mini_batch_size=4, avg_length=2)
# generated = model.generate_fast(100,
# first_samples=torch.zeros((1)),
# sampled_generation=True)
# print(generated)
print('start training...')
tic = time.time()
# optimizer.train(data, epochs=100)
optimizer.train(learning_rate=0.01,
minibatch_size=4,
epochs=10,
segments_per_chunk=4,
examples_per_segment=8)
toc = time.time()
print('Training took {} seconds.'.format(toc - tic))
generated = model.generate_fast(500)
print(generated)
| 35.121951
| 98
| 0.573611
|
535fb8ad0fb626a9763778835615b0079802d66b
| 2,895
|
py
|
Python
|
region_proposal_network/loss.py
|
fredfung007/snlt
|
5ce66f9f79808d39341dd3dfbab6a2c78e1c0425
|
[
"Apache-2.0"
] | 8
|
2021-04-08T12:48:13.000Z
|
2022-03-30T03:28:03.000Z
|
region_proposal_network/loss.py
|
fredfung007/snlt
|
5ce66f9f79808d39341dd3dfbab6a2c78e1c0425
|
[
"Apache-2.0"
] | 11
|
2021-05-21T08:56:16.000Z
|
2022-03-15T01:58:21.000Z
|
region_proposal_network/loss.py
|
fredfung007/snlt
|
5ce66f9f79808d39341dd3dfbab6a2c78e1c0425
|
[
"Apache-2.0"
] | 3
|
2021-06-21T17:14:41.000Z
|
2021-11-30T12:34:34.000Z
|
# COPYRIGHT 2021. Fred Fung. Boston University.
import torch
def get_cls_loss(pred, label, select):
if len(select.size()) == 0 or select.size() == torch.Size([0]):
return torch.tensor(0., requires_grad=False)
pred = torch.index_select(pred, 0, select)
label = torch.index_select(label, 0, select)
return torch.nn.functional.nll_loss(pred, label)
def select_cross_entropy_loss(pred, label, phrase_present=None):
if phrase_present is not None:
label = label.transpose(0, -1)
label = phrase_present * (1 + label) - 1
label = label.transpose(0, -1)
label = label.type(torch.int64)
pred = pred.view(-1, 2)
label = label.view(-1)
pos = torch.nonzero(label.data.eq(1), as_tuple=False).squeeze()
neg = torch.nonzero(label.data.eq(0), as_tuple=False).squeeze()
pos = pos.cuda()
neg = neg.cuda()
loss_pos = get_cls_loss(pred, label, pos)
loss_neg = get_cls_loss(pred, label, neg)
return loss_pos * 0.5 + loss_neg * 0.5
def weight_l1_loss(pred_loc, label_loc, loss_weight, phrase_present=None):
b, _, sh, sw = pred_loc.size()
pred_loc = pred_loc.view(b, 4, -1, sh, sw)
diff = (pred_loc - label_loc).abs()
if phrase_present is not None:
diff = diff.transpose(0, -1)
diff = phrase_present * diff
diff = diff.transpose(0, -1)
diff = diff.sum(dim=1).view(b, -1, sh, sw)
loss = diff * loss_weight
if phrase_present is not None:
count = torch.sum(phrase_present)
if not count.is_nonzero():
count += 1
else:
count = b
return loss.sum().div(count)
def triplet_loss(pred, label, phrase_present=None, margin=0.4):
if phrase_present is not None:
label = label.transpose(0, -1)
label = phrase_present * (1 + label) - 1
label = label.transpose(0, -1)
label = label.type(torch.int64)
batch_size = pred.shape[0]
pred = pred.view(batch_size, -1, 2)
score = torch.nn.functional.softmax(pred, dim=-1)[:, :, 1]
label = label.view(batch_size, -1)
pos = label.eq(1).nonzero(as_tuple=False).squeeze()
neg = label.eq(0).nonzero(as_tuple=False).squeeze()
pos = pos.cuda()
neg = neg.cuda()
if len(pos.size()) == 0 or pos.size() == torch.Size([0]) or len(neg.size()) == 0 or neg.size() == torch.Size([0]):
return torch.tensor(0., requires_grad=False)
pos_pred = torch.stack([score[batch, index] for batch, index in pos])
neg_pred = torch.stack([score[batch, index] for batch, index in neg])
pos_length = pos.size()[0]
neg_length = neg.size()[0]
pos_pred = pos_pred.repeat(neg_length).view(neg_length, pos_length)
neg_pred = neg_pred.repeat(pos_length).view(pos_length, neg_length).transpose(0, 1)
distance = neg_pred - pos_pred + margin
loss = torch.mean(torch.max(distance, torch.zeros_like(distance)))
return loss
| 39.121622
| 118
| 0.641105
|
d30eaa5806502d6cdf5c803c035d967dc9a6fad9
| 11,508
|
py
|
Python
|
my_pybullet_envs/inmoov_hand.py
|
jyf588/pytorch-rl-bullet
|
3ac1835d01e658b2078126895ffa0eb11304abb4
|
[
"MIT"
] | null | null | null |
my_pybullet_envs/inmoov_hand.py
|
jyf588/pytorch-rl-bullet
|
3ac1835d01e658b2078126895ffa0eb11304abb4
|
[
"MIT"
] | null | null | null |
my_pybullet_envs/inmoov_hand.py
|
jyf588/pytorch-rl-bullet
|
3ac1835d01e658b2078126895ffa0eb11304abb4
|
[
"MIT"
] | null | null | null |
import pybullet as p
import time
import gym, gym.utils.seeding
import numpy as np
import math
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# TODO: mass good or not?
# TODO: render
class InmoovHand:
def __init__(self,
base_init_pos=np.array([0., 0, 0.1]),
base_init_euler=np.array([1.57, 0, 3.14]),
init_fin_pos=np.array([0.3, 0.0, 0.8] + [0.4] * 6 + [0.0, 0.4, 0.4, 0.4] * 2)): # TODO
self.baseInitPos = base_init_pos
self.baseInitOri = base_init_euler
self.initPos = init_fin_pos
# TODO: note, no self-collision flag
# TODO: inmoov thumb seems to have some flaws
self.handId = p.loadURDF(os.path.join(currentdir, "assets/inmoov_ros/inmoov_description/robots/inmoov_right_hand.urdf"),
list(self.baseInitPos), p.getQuaternionFromEuler(list(self.baseInitOri)))
nDof = p.getNumJoints(self.handId)
# for i in range(p.getNumJoints(self.handId)):
# print(p.getJointInfo(self.handId, i)[2], p.getJointInfo(self.handId, i)[8], p.getJointInfo(self.handId, i)[9])
# TODO: 13, 19 are strange ring DoF, the rest are fixed joints to be excluded
self.activeDofs = [1,2,3, 5,6,7, 9,10,11, 13,14,15,16, 19,20,21,22] # 17DOFs
self.ll = np.array([p.getJointInfo(self.handId, i)[8] for i in range(nDof)])
self.ul = np.array([p.getJointInfo(self.handId, i)[9] for i in range(nDof)]) # use np for multi indexing
self.ll = self.ll[self.activeDofs]
self.ul = self.ul[self.activeDofs]
for ind in range(len(self.activeDofs)):
p.resetJointState(self.handId, self.activeDofs[ind], self.initPos[ind], 0.0)
for i in range(-1, p.getNumJoints(self.handId)):
p.changeDynamics(self.handId, i, lateralFriction=3.0)
# TODO: decrease mass for now
mass = p.getDynamicsInfo(self.handId, i)[0]
# inertia = p.getDynamicsInfo(self.handId, i)[2]
mass = mass * 0.1
# inertia = [ele * 100. for ele in inertia]
p.changeDynamics(self.handId, i, mass=mass)
# p.changeDynamics(self.handId, i, localInertiaDiagnoal=inertia) # TODO: default inertia from bullet
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/constraint.py#L11
self.cid = p.createConstraint(self.handId, -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0, 0, 0],
childFramePosition=list(self.baseInitPos),
childFrameOrientation=p.getQuaternionFromEuler(list(self.baseInitOri)))
self.tarBasePos = np.copy(self.baseInitPos)
self.tarBaseOri = np.copy(self.baseInitOri) # euler angles
self.tarFingerPos = np.copy(self.initPos) # used for position control and as part of state
self.maxForce = 500.
self.include_redun_body_pos = False
self.np_random = None # seeding inited outside in Env
# print(self.tarFingerPos)
# print(self.ll)
# print(self.ul)
assert len(self.tarFingerPos) == len(self.ll)
def reset(self):
# TODO: bullet env reload urdfs in reset...
# TODO: bullet env reset pos with added noise but velocity to zero always.
goodInit = False
while not goodInit:
# initBasePos = self.baseInitPos
# initOri = self.baseInitOri
initBasePos = np.array(self.baseInitPos)
initBasePos[0] += self.np_random.uniform(low=-0.05, high=0.05)
initBasePos[1] += self.np_random.uniform(low=-0.05, high=0.05)
initBasePos[2] += self.np_random.uniform(low=-0.05, high=0.05) # enlarge here
initOri = np.array(self.baseInitOri) + self.np_random.uniform(low=-0.05, high=0.05, size=3)
initQuat = p.getQuaternionFromEuler(list(initOri))
#
# print(p.getEulerFromQuaternion(initQuat))
# TODO: added noise
# init self.np_random outside, in Env
# initPos = self.initPos
initPos = self.initPos + self.np_random.uniform(low=-0.1, high=0.1, size=len(self.initPos))
p.removeConstraint(self.cid)
p.resetBasePositionAndOrientation(self.handId, initBasePos, initQuat)
for ind in range(len(self.activeDofs)):
p.resetJointState(self.handId, self.activeDofs[ind], initPos[ind], 0.0)
self.cid = p.createConstraint(self.handId, -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0, 0, 0],
childFramePosition=initBasePos,
childFrameOrientation=initQuat)
p.stepSimulation() # TODO
cps = p.getContactPoints(bodyA=self.handId)
# for cp in cps:
# print(cp)
# input("penter")
# print(cps[0][6])
if len(cps) == 0: goodInit = True # TODO: init hand last and make sure it does not colllide with env
self.tarBasePos = np.copy(initBasePos)
self.tarBaseOri = np.copy(initOri)
self.tarFingerPos = np.copy(initPos)
def get_raw_state_fingers(self, includeVel=True):
joints_state = p.getJointStates(self.handId, self.activeDofs)
if includeVel:
joints_state = np.array(joints_state)[:,[0,1]]
else:
joints_state = np.array(joints_state)[:, [0]]
# print(joints_state.flatten())
return np.hstack(joints_state.flatten())
def get_robot_observation(self):
obs = []
basePos, baseQuat = p.getBasePositionAndOrientation(self.handId)
obs.extend(basePos)
obs.extend(baseQuat)
obs.extend(list(self.get_raw_state_fingers()))
# print(self.get_raw_state_fingers())
# TODO: no finger vel
baseVels = p.getBaseVelocity(self.handId)
obs.extend(baseVels[0])
obs.extend(baseVels[1])
obs.extend(list(self.tarFingerPos))
# print(self.tarFingerPos)
obs.extend(list(self.tarBasePos))
tarQuat = p.getQuaternionFromEuler(list(self.tarBaseOri))
obs.extend(tarQuat)
if self.include_redun_body_pos:
for i in range(p.getNumJoints(self.handId)):
pos = p.getLinkState(self.handId, i)[0] # [0] stores xyz position
obs.extend(pos)
return obs
def get_robot_observation_dim(self):
return len(self.get_robot_observation())
def get_finger_dist_from_init(self):
return np.linalg.norm(self.get_raw_state_fingers(includeVel=False) - self.initPos)
# def get_three_finger_deviation(self):
# fingers_q = self.get_raw_state_fingers(includeVel=False)
# assert len(fingers_q) == 16 # TODO
# f1 = fingers_q[:4]
# f2 = fingers_q[4:8]
# f3 = fingers_q[8:12]
# # TODO: is this different from dist to mean
# return np.linalg.norm(f1-f2) + np.linalg.norm(f2-f3) + np.linalg.norm(f1-f3)
def apply_action(self, a):
# print("action", a)
# TODO: should encourage same q for first 3 fingers for now
# TODO: a is already scaled, how much to scale? decide in Env.
# should be delta control (policy outputs delta position), but better add to old tar pos instead of cur pos
# TODO: but tar pos should be part of state vector (? how accurate is pos_con?)
a = np.array(a)
dxyz = a[0:3]
dOri = a[3:6]
self.tarBasePos += dxyz
self.tarBasePos[:2] = np.clip(self.tarBasePos[:2], -0.3, 0.3)
self.tarBasePos[2] = np.clip(self.tarBasePos[2], -0.05, 0.2) # so that it cannot go below obj and stop obj wo grasp
ori_lb = self.baseInitOri - 1.57 # TODO: is this right?
ori_ub = self.baseInitOri + 1.57
self.tarBaseOri += dOri
self.tarBaseOri = np.clip(self.tarBaseOri, ori_lb, ori_ub)
# print(self.tarBasePos)
# print(p.getBasePositionAndOrientation(self.handId))
tarQuat = p.getQuaternionFromEuler(list(self.tarBaseOri))
p.changeConstraint(self.cid, list(self.tarBasePos), tarQuat, maxForce=self.maxForce)
self.tarFingerPos += a[6:] # length should match
self.tarFingerPos = np.clip(self.tarFingerPos, self.ll, self.ul)
# p.setJointMotorControlArray(self.handId,
# self.activeDofs,
# p.POSITION_CONTROL,
# targetPositions=list(self.tarFingerPos))
# p.setJointMotorControlArray(self.handId,
# self.activeDofs,
# p.POSITION_CONTROL,
# targetPositions=list(self.tarFingerPos),
# forces=[self.maxForce]*len(self.tarFingerPos))
for i in range(len(self.activeDofs)):
p.setJointMotorControl2(self.handId,
jointIndex=self.activeDofs[i],
controlMode=p.POSITION_CONTROL,
targetPosition=self.tarFingerPos[i],
force=self.maxForce) # TODO: finger force now smaller
if __name__ == "__main__":
physicsClient = p.connect(p.GUI) #or p.DIRECT for non-graphical version
# p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
# p.setGravity(0,0,-10)
# planeId = p.loadURDF("plane.urdf")
# cubeStartPos = [0,0,1]
# cubeStartOrientation = p.getQuaternionFromEuler([0,0,0])
# # boxId = p.loadURDF("r2d2.urdf",cubeStartPos, cubeStartOrientation)
#
# boxId = p.loadURDF("/home/yifengj/Downloads/allegro_hand_description/allegro_hand_description_right.urdf", cubeStartPos,
# cubeStartOrientation)
#
# for i in range (1000000):
# p.stepSimulation()
# time.sleep(1./240.)
# cubePos, cubeOrn = p.getBasePositionAndOrientation(boxId)
# print(cubePos,cubeOrn)
p.setTimeStep(1./240)
# p.setGravity(0, 0, -10)
floorId = p.loadURDF(os.path.join(currentdir, 'assets/plane.urdf'), [0, 0, 0], useFixedBase=1)
p.changeDynamics(floorId, -1, lateralFriction=3.0)
a = InmoovHand()
a.np_random, seed = gym.utils.seeding.np_random(0)
for i in range(100):
np.random.seed(0)
a.reset()
# a = InmoovHand()
# p.resetSimulation()
# p.setTimeStep(1. / 240)
p.setGravity(0, 0, -10)
#
# p.loadURDF(os.path.join(currentdir, 'assets/plane.urdf'), [0, 0, 0], useFixedBase=1)
# a = AllegroHand()
input("press enter to continue")
print("init", a.get_robot_observation())
for t in range(400):
# a.apply_action(np.random.uniform(low=-0.02, high=0.02, size=6+16)-0.01)
a.apply_action(np.array([0.01]*23))
# a.apply_action(np.array([0] * 22))
p.stepSimulation()
time.sleep(1./240.)
print("final obz", a.get_robot_observation())
p.disconnect()
# def seed(self, seed=None):
# self.np_random, seed = gym.utils.seeding.np_random(seed)
# self.robot.np_random = self.np_random # use the same np_randomizer for robot as for env
# return [seed]
| 41.695652
| 128
| 0.595846
|
ea1b9010b7ef57cdf4fa6cdff02eff0315522a70
| 7,430
|
py
|
Python
|
accelbyte_py_sdk/api/iam/operations/third_party_credential/delete_third_party_login_platform_credential_v3.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/iam/operations/third_party_credential/delete_third_party_login_platform_credential_v3.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/iam/operations/third_party_credential/delete_third_party_login_platform_credential_v3.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
# Auto-generated at 2021-09-27T17:01:25.033996+08:00
# from: Justice Iam Service (4.1.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import RestErrorResponse
class DeleteThirdPartyLoginPlatformCredentialV3(Operation):
"""Delete Third Party Platform Credential (DeleteThirdPartyLoginPlatformCredentialV3)
Properties:
url: /iam/v3/admin/namespaces/{namespace}/platforms/{platformId}/clients
method: DELETE
tags: Third Party Credential
consumes: []
produces: ["application/json"]
security: bearer
namespace: (namespace) REQUIRED str in path
platform_id: (platformId) REQUIRED str in path
Responses:
204: No Content - (Delete Process Successful)
401: Unauthorized - (Unauthorized)
403: Forbidden - (Forbidden)
404: Not Found - RestErrorResponse (Third Party Credential Not Found)
500: Internal Server Error - RestErrorResponse (Internal Server Error)
"""
# region fields
_url: str = "/iam/v3/admin/namespaces/{namespace}/platforms/{platformId}/clients"
_method: str = "DELETE"
_consumes: List[str] = []
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
namespace: str # REQUIRED in [path]
platform_id: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
# path params
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
return result
# noinspection PyMethodMayBeStatic
def get_all_required_fields(self) -> List[str]:
return [
"namespace",
"platform_id",
]
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "platform_id"):
result["platformId"] = self.platform_id
return result
# endregion get_x_params methods
# region is/has methods
def is_valid(self) -> bool:
if not hasattr(self, "namespace") or self.namespace is None:
return False
if not hasattr(self, "platform_id") or self.platform_id is None:
return False
return True
# endregion is/has methods
# region with_x methods
def with_namespace(self, value: str) -> DeleteThirdPartyLoginPlatformCredentialV3:
self.namespace = value
return self
def with_platform_id(self, value: str) -> DeleteThirdPartyLoginPlatformCredentialV3:
self.platform_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
if hasattr(self, "platform_id") and self.platform_id:
result["platformId"] = str(self.platform_id)
elif include_empty:
result["platformId"] = str()
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, HttpResponse], Union[None, HttpResponse, RestErrorResponse]]:
"""Parse the given response.
204: No Content - (Delete Process Successful)
401: Unauthorized - (Unauthorized)
403: Forbidden - (Forbidden)
404: Not Found - RestErrorResponse (Third Party Credential Not Found)
500: Internal Server Error - RestErrorResponse (Internal Server Error)
"""
if code == 204:
return HttpResponse.create(code, "No Content"), None
if code == 401:
return None, HttpResponse.create(code, "Unauthorized")
if code == 403:
return None, HttpResponse.create(code, "Forbidden")
if code == 404:
return None, RestErrorResponse.create_from_dict(content)
if code == 500:
return None, RestErrorResponse.create_from_dict(content)
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
platform_id: str,
) -> DeleteThirdPartyLoginPlatformCredentialV3:
instance = cls()
instance.namespace = namespace
instance.platform_id = platform_id
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> DeleteThirdPartyLoginPlatformCredentialV3:
instance = cls()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
if "platformId" in dict_ and dict_["platformId"] is not None:
instance.platform_id = str(dict_["platformId"])
elif include_empty:
instance.platform_id = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"namespace": "namespace",
"platformId": "platform_id",
}
# endregion static methods
| 29.72
| 155
| 0.63284
|
1d8208138ae29283edf6aa19be2702e84c749447
| 19,970
|
py
|
Python
|
syft/serde/msgpack/serde.py
|
Prtfw/PySyft
|
82eede90acf09f26de389237524897e2b142f9ff
|
[
"Apache-2.0"
] | null | null | null |
syft/serde/msgpack/serde.py
|
Prtfw/PySyft
|
82eede90acf09f26de389237524897e2b142f9ff
|
[
"Apache-2.0"
] | null | null | null |
syft/serde/msgpack/serde.py
|
Prtfw/PySyft
|
82eede90acf09f26de389237524897e2b142f9ff
|
[
"Apache-2.0"
] | null | null | null |
"""
This file exists to provide one common place for all msgpack serialization to occur.
As msgpack only supports basic types and binary formats every type must be first be
converted to one of these types. Thus, we've split our functionality into three steps.
When converting from a PySyft object (or collection of objects) to an object to be
sent over the wire (a message), those three steps are (in order):
1. Simplify - converts PyTorch objects to simple Python objects (using pickle)
2. Serialize - converts Python objects to binary
3. Compress - compresses the binary (Now we're ready send!)
Inversely, when converting from a message sent over the wire back to a PySyft
object, the three steps are (in order):
1. Decompress - converts compressed binary back to decompressed binary
2. Deserialize - converts from binary to basic python objects
3. Detail - converts some basic python objects back to PyTorch objects (Tensors)
Furthermore, note that there is different simplification/serialization logic
for objects of different types. Thus, instead of using if/else logic, we have
global dictionaries which contain functions and Python types as keys. For
simplification logic, this dictionary is called "simplifiers". The keys
are the types and values are the simplification logic. For example,
simplifiers[tuple] will return the function which knows how to simplify the
tuple type. The same is true for all other simplifier/detailer functions.
By default, the simplification/detail operations expect Torch tensors. If the setup requires other
serialization process, it can override the functions _serialize_tensor and _deserialize_tensor
By default, we serialize using msgpack and compress using lz4.
If different compressions are required, the worker can override the function apply_compress_scheme
"""
from collections import OrderedDict
from typing import Callable
import inspect
import msgpack as msgpack_lib
import syft
from syft import dependency_check
from syft.federated.train_config import TrainConfig
from syft.frameworks.torch.tensors.decorators.logging import LoggingTensor
from syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor
from syft.frameworks.torch.tensors.interpreters.private import PrivateTensor
from syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor
from syft.frameworks.torch.tensors.interpreters.crt_precision import CRTPrecisionTensor
from syft.frameworks.torch.tensors.interpreters.autograd import AutogradTensor
from syft.execution.placeholder import PlaceHolder
from syft.execution.placeholder_id import PlaceholderId
from syft.execution.role import Role
from syft.generic.pointers.multi_pointer import MultiPointerTensor
from syft.generic.pointers.object_pointer import ObjectPointer
from syft.generic.pointers.pointer_tensor import PointerTensor
from syft.generic.pointers.pointer_plan import PointerPlan
from syft.generic.pointers.pointer_protocol import PointerProtocol
from syft.generic.pointers.object_wrapper import ObjectWrapper
from syft.generic.string import String
from syft.execution.plan import Plan
from syft.execution.state import State
from syft.execution.computation import ComputationAction
from syft.execution.communication import CommunicationAction
from syft.execution.protocol import Protocol
from syft.messaging.message import TensorCommandMessage
from syft.messaging.message import ObjectMessage
from syft.messaging.message import ObjectRequestMessage
from syft.messaging.message import IsNoneMessage
from syft.messaging.message import GetShapeMessage
from syft.messaging.message import ForceObjectDeleteMessage
from syft.messaging.message import SearchMessage
from syft.messaging.message import PlanCommandMessage
from syft.messaging.message import WorkerCommandMessage
from syft.serde import compression
from syft.serde.msgpack.native_serde import MAP_NATIVE_SIMPLIFIERS_AND_DETAILERS
from syft.workers.abstract import AbstractWorker
from syft.workers.base import BaseWorker
from syft.frameworks.torch.fl import BaseDataset
from syft.generic.pointers.pointer_dataset import PointerDataset
from syft.exceptions import GetNotPermittedError
from syft.exceptions import ResponseSignatureError
from syft.frameworks.torch.tensors.interpreters.gradients_core import GradFunc
if dependency_check.torch_available:
from syft.serde.msgpack.torch_serde import MAP_TORCH_SIMPLIFIERS_AND_DETAILERS
else:
MAP_TORCH_SIMPLIFIERS_AND_DETAILERS = {}
if dependency_check.tensorflow_available:
from syft_tensorflow.serde import MAP_TF_SIMPLIFIERS_AND_DETAILERS
else:
MAP_TF_SIMPLIFIERS_AND_DETAILERS = {}
from syft.serde.msgpack.proto import proto_type_info
# Maps a type to a tuple containing its simplifier and detailer function
# NOTE: serialization constants for these objects need to be defined in `proto.json` file
# in https://github.com/OpenMined/proto
MAP_TO_SIMPLIFIERS_AND_DETAILERS = OrderedDict(
list(MAP_NATIVE_SIMPLIFIERS_AND_DETAILERS.items())
+ list(MAP_TORCH_SIMPLIFIERS_AND_DETAILERS.items())
+ list(MAP_TF_SIMPLIFIERS_AND_DETAILERS.items())
)
# If an object implements its own simplify and detail functions it should be stored in this list
# NOTE: serialization constants for these objects need to be defined in `proto.json` file
# in https://github.com/OpenMined/proto
OBJ_SIMPLIFIER_AND_DETAILERS = [
AdditiveSharingTensor,
FixedPrecisionTensor,
PrivateTensor,
CRTPrecisionTensor,
LoggingTensor,
MultiPointerTensor,
PlaceHolder,
PlaceholderId,
Role,
ObjectPointer,
Plan,
State,
ComputationAction,
CommunicationAction,
Protocol,
PointerTensor,
PointerPlan,
PointerProtocol,
ObjectWrapper,
TrainConfig,
BaseWorker,
AutogradTensor,
TensorCommandMessage,
ObjectMessage,
ObjectRequestMessage,
IsNoneMessage,
GetShapeMessage,
ForceObjectDeleteMessage,
SearchMessage,
PlanCommandMessage,
WorkerCommandMessage,
GradFunc,
String,
BaseDataset,
PointerDataset,
]
# If an object implements its own force_simplify and force_detail functions it should be stored in this list
# NOTE: serialization constants for these objects need to be defined in `proto.json` file
# in https://github.com/OpenMined/proto
OBJ_FORCE_FULL_SIMPLIFIER_AND_DETAILERS = [BaseWorker]
# For registering syft objects with custom simplify and detail methods
# NOTE: serialization constants for these objects need to be defined in `proto.json` file
# in https://github.com/OpenMined/proto
EXCEPTION_SIMPLIFIER_AND_DETAILERS = [GetNotPermittedError, ResponseSignatureError]
## SECTION: High Level Simplification Router
def _force_full_simplify(worker: AbstractWorker, obj: object) -> object:
"""To force a full simplify generally if the usual _simplify is not suitable.
If we can not full simplify a object we simplify it as usual instead.
Args:
obj: The object.
Returns:
The simplified object.
"""
# check to see if there is a full simplifier
# for this type. If there is, return the full simplified object.
current_type = type(obj)
if current_type in forced_full_simplifiers:
result = (
forced_full_simplifiers[current_type][0],
forced_full_simplifiers[current_type][1](worker, obj),
)
return result
# If we already tried to find a full simplifier for this type but failed, we should
# simplify it instead.
elif current_type in no_full_simplifiers_found:
return _simplify(worker, obj)
else:
# If the object type is not in forced_full_simplifiers,
# we check the classes that this object inherits from.
# `inspect.getmro` give us all types this object inherits
# from, including `type(obj)`. We can skip the type of the
# object because we already tried this in the
# previous step.
classes_inheritance = inspect.getmro(type(obj))[1:]
for inheritance_type in classes_inheritance:
if inheritance_type in forced_full_simplifiers:
# Store the inheritance_type in forced_full_simplifiers so next
# time we see this type serde will be faster.
forced_full_simplifiers[current_type] = forced_full_simplifiers[inheritance_type]
result = (
forced_full_simplifiers[current_type][0],
forced_full_simplifiers[current_type][1](worker, obj),
)
return result
# If there is not a full_simplifier for this
# object, then we simplify it.
no_full_simplifiers_found.add(current_type)
return _simplify(worker, obj)
## SECTION: dinamically generate simplifiers and detailers
def _generate_simplifiers_and_detailers():
"""Generate simplifiers, forced full simplifiers and detailers,
by registering native and torch types, syft objects with custom
simplify and detail methods, or syft objects with custom
force_simplify and force_detail methods.
NOTE: this function uses `proto_type_info` that translates python class into Serde constant defined in
https://github.com/OpenMined/proto. If the class used in `MAP_TO_SIMPLIFIERS_AND_DETAILERS`,
`OBJ_SIMPLIFIER_AND_DETAILERS`, `EXCEPTION_SIMPLIFIER_AND_DETAILERS`, `OBJ_FORCE_FULL_SIMPLIFIER_AND_DETAILERS`
is not defined in `proto.json` file in https://github.com/OpenMined/proto, this function will error.
Returns:
The simplifiers, forced_full_simplifiers, detailers
"""
simplifiers = OrderedDict()
forced_full_simplifiers = OrderedDict()
detailers = OrderedDict()
def _add_simplifier_and_detailer(curr_type, simplifier, detailer, forced=False):
type_info = proto_type_info(curr_type)
if forced:
forced_full_simplifiers[curr_type] = (type_info.forced_code, simplifier)
detailers[type_info.forced_code] = detailer
else:
simplifiers[curr_type] = (type_info.code, simplifier)
detailers[type_info.code] = detailer
# Register native and torch types
for curr_type in MAP_TO_SIMPLIFIERS_AND_DETAILERS:
simplifier, detailer = MAP_TO_SIMPLIFIERS_AND_DETAILERS[curr_type]
_add_simplifier_and_detailer(curr_type, simplifier, detailer)
# Register syft objects with custom simplify and detail methods
for syft_type in OBJ_SIMPLIFIER_AND_DETAILERS + EXCEPTION_SIMPLIFIER_AND_DETAILERS:
simplifier, detailer = syft_type.simplify, syft_type.detail
_add_simplifier_and_detailer(syft_type, simplifier, detailer)
# Register syft objects with custom force_simplify and force_detail methods
for syft_type in OBJ_FORCE_FULL_SIMPLIFIER_AND_DETAILERS:
force_simplifier, force_detailer = syft_type.force_simplify, syft_type.force_detail
_add_simplifier_and_detailer(syft_type, force_simplifier, force_detailer, forced=True)
return simplifiers, forced_full_simplifiers, detailers
simplifiers, forced_full_simplifiers, detailers = _generate_simplifiers_and_detailers()
# Store types that are not simplifiable (int, float, None) so we
# can ignore them during serialization.
no_simplifiers_found, no_full_simplifiers_found = set(), set()
# Store types that use simplifiers from their ancestors so we
# can look them up quickly during serialization.
inherited_simplifiers_found = OrderedDict()
def _serialize_msgpack_simple(
obj: object,
worker: AbstractWorker = None,
simplified: bool = False,
force_full_simplification: bool = False,
) -> bin:
if worker is None:
# TODO[jvmancuso]: This might be worth a standalone function.
worker = syft.framework.hook.local_worker
# 1) Simplify
# simplify difficult-to-serialize objects. See the _simpliy method
# for details on how this works. The general purpose is to handle types
# which the fast serializer cannot handle
if not simplified:
if force_full_simplification:
simple_objects = _force_full_simplify(worker, obj)
else:
simple_objects = _simplify(worker, obj)
else:
simple_objects = obj
return simple_objects
def _serialize_msgpack_binary(
simple_objects: object,
worker: AbstractWorker = None,
simplified: bool = False,
force_full_simplification: bool = False,
) -> bin:
# 2) Serialize
# serialize into a binary
binary = msgpack_lib.dumps(simple_objects)
# 3) Compress
# compress the binary and return the result
# prepend a 1-byte header '0' or '1' to the output stream
# to denote whether output stream is compressed or not
# if compressed stream length is greater than input stream
# we output the input stream as it is with header set to '0'
# otherwise we output the compressed stream with header set to '1'
# even if compressed flag is set to false by the caller we
# output the input stream as it is with header set to '0'
return compression._compress(binary)
def serialize(
obj: object,
worker: AbstractWorker = None,
simplified: bool = False,
force_full_simplification: bool = False,
) -> bin:
"""This method can serialize any object PySyft needs to send or store.
This is the high level function for serializing any object or collection
of objects which PySyft needs to send over the wire. It includes three
steps, Simplify, Serialize, and Compress as described inline below.
Args:
obj (object): the object to be serialized
simplified (bool): in some cases we want to pass in data which has
already been simplified - in which case we must skip double
simplification - which would be bad.... so bad... so... so bad
force_full_simplification (bool): Some objects are only partially serialized
by default. For objects where this is the case, setting this flag to True
will force the entire object to be serialized. For example, setting this
flag to True will cause a VirtualWorker to be serialized WITH all of its
tensors while by default VirtualWorker objects only serialize a small
amount of metadata.
Returns:
binary: the serialized form of the object.
"""
if worker is None:
# TODO[jvmancuso]: This might be worth a standalone function.
worker = syft.framework.hook.local_worker
simple_objects = _serialize_msgpack_simple(obj, worker, simplified, force_full_simplification)
return _serialize_msgpack_binary(simple_objects)
def _deserialize_msgpack_binary(binary: bin, worker: AbstractWorker = None) -> object:
if worker is None:
# TODO[jvmancuso]: This might be worth a standalone function.
worker = syft.framework.hook.local_worker
# 1) Decompress the binary if needed
binary = compression._decompress(binary)
# 2) Deserialize
# This function converts the binary into the appropriate python
# object (or nested dict/collection of python objects)
simple_objects = msgpack_lib.loads(binary, use_list=False)
# sometimes we want to skip detailing (such as in Plan)
return simple_objects
def _deserialize_msgpack_simple(simple_objects: object, worker: AbstractWorker = None) -> object:
if worker is None:
# TODO[jvmancuso]: This might be worth a standalone function.
worker = syft.framework.hook.local_worker
# 3) Detail
# This function converts typed, simple objects into their morefrom typing import Dict
# complex (and difficult to serialize) counterparts which the
# serialization library wasn't natively able to serialize (such
# as msgpack's inability to serialize torch tensors or ... or
# python slice objects
return _detail(worker, simple_objects)
def deserialize(binary: bin, worker: AbstractWorker = None) -> object:
if worker is None:
# TODO[jvmancuso]: This might be worth a standalone function.
worker = syft.framework.hook.local_worker
simple_objects = _deserialize_msgpack_binary(binary, worker)
return _deserialize_msgpack_simple(simple_objects, worker)
def _simplify(worker: AbstractWorker, obj: object, **kwargs) -> object:
"""
This function takes an object as input and returns a simple
Python object which is supported by the chosen serialization
method (such as JSON or msgpack). The reason we have this function
is that some objects are either NOT supported by high level (fast)
serializers OR the high level serializers don't support the fastest
form of serialization. For example, PyTorch tensors have custom pickle
functionality thus its better to pre-serialize PyTorch tensors using
pickle and then serialize the binary in with the rest of the message
being sent.
Args:
obj: An object which may need to be simplified.
Returns:
An simple Python object which msgpack can serialize.
Raises:
ValueError: if `move_this` or `in_front_of_that` are not both single ASCII
characters.
"""
# Check to see if there is a simplifier
# for this type. If there is, return the simplified object.
# breakpoint()
current_type = type(obj)
# print(current_type, current_type in simplifiers)
if current_type in simplifiers:
result = (simplifiers[current_type][0], simplifiers[current_type][1](worker, obj, **kwargs))
return result
elif current_type in inherited_simplifiers_found:
result = (
inherited_simplifiers_found[current_type][0],
inherited_simplifiers_found[current_type][1](worker, obj, **kwargs),
)
return result
# If we already tried to find a simplifier for this type but failed, we should
# just return the object as it is.
elif current_type in no_simplifiers_found:
return obj
else:
# If the object type is not in simplifiers,
# we check the classes that this object inherits from.
# `inspect.getmro` give us all types this object inherits
# from, including `type(obj)`. We can skip the type of the
# object because we already tried this in the
# previous step.
classes_inheritance = inspect.getmro(type(obj))[1:]
for inheritance_type in classes_inheritance:
if inheritance_type in simplifiers:
# Store the inheritance_type in simplifiers so next time we see this type
# serde will be faster.
inherited_simplifiers_found[current_type] = simplifiers[inheritance_type]
result = (
inherited_simplifiers_found[current_type][0],
inherited_simplifiers_found[current_type][1](worker, obj, **kwargs),
)
return result
# if there is not a simplifier for this
# object, then the object is already a
# simple python object and we can just
# return it.
no_simplifiers_found.add(current_type)
return obj
def _detail(worker: AbstractWorker, obj: object, **kwargs) -> object:
"""Reverses the functionality of _simplify.
Where applicable, it converts simple objects into more complex objects such
as converting binary objects into torch tensors. Read _simplify for more
information on why _simplify and detail are needed.
Args:
worker: the worker which is acquiring the message content, for example
used to specify the owner of a tensor received(not obvious for
virtual workers).
obj: a simple Python object which msgpack deserialized.
Returns:
obj: a more complex Python object which msgpack would have had trouble
deserializing directly.
"""
if type(obj) in (list, tuple):
return detailers[obj[0]](worker, obj[1], **kwargs)
else:
return obj
| 42.042105
| 115
| 0.743165
|
cb29ed3138880ac06db6f930645029ba7c690240
| 1,708
|
py
|
Python
|
ortools/algorithms/samples/simple_knapsack_program.py
|
sreesubbash/or-tools
|
701496e45d54fa9938afeedec43089314d93ec11
|
[
"Apache-2.0"
] | 1
|
2021-03-30T21:10:27.000Z
|
2021-03-30T21:10:27.000Z
|
ortools/algorithms/samples/simple_knapsack_program.py
|
sreesubbash/or-tools
|
701496e45d54fa9938afeedec43089314d93ec11
|
[
"Apache-2.0"
] | null | null | null |
ortools/algorithms/samples/simple_knapsack_program.py
|
sreesubbash/or-tools
|
701496e45d54fa9938afeedec43089314d93ec11
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START program]
"""A simple knapsack problem."""
# [START import]
from ortools.algorithms import pywrapknapsack_solver
# [END import]
def main():
# Create the solver.
# [START solver]
solver = pywrapknapsack_solver.KnapsackSolver(
pywrapknapsack_solver.KnapsackSolver.
KNAPSACK_DYNAMIC_PROGRAMMING_SOLVER, "test")
# [END solver]
# [START data]
weights = [[
565, 406, 194, 130, 435, 367, 230, 315, 393, 125, 670, 892, 600, 293,
712, 147, 421, 255
]]
capacities = [850]
values = weights[0]
# [END data]
# [START solve]
solver.Init(values, weights, capacities)
computed_value = solver.Solve()
# [END solve]
# [START print_solution]
packed_items = [
x for x in range(0, len(weights[0])) if solver.BestSolutionContains(x)
]
packed_weights = [weights[0][i] for i in packed_items]
print("Packed items: ", packed_items)
print("Packed weights: ", packed_weights)
print("Total weight (same as total value): ", computed_value)
# [END print_solution]
if __name__ == "__main__":
main()
# [END program]
| 29.964912
| 78
| 0.679742
|
ec69e07cb196cebe6fe64b2390c4fcdb0376ced1
| 5,602
|
py
|
Python
|
eve_module/user_auth/text.py
|
alentoghostflame/StupidEveAppraisalBot
|
a78adb206efd3a4dc41cbbfb45ee0b8b61aea248
|
[
"MIT"
] | 1
|
2021-12-12T02:50:20.000Z
|
2021-12-12T02:50:20.000Z
|
eve_module/user_auth/text.py
|
alentoghostflame/StupidEveAppraisalBot
|
a78adb206efd3a4dc41cbbfb45ee0b8b61aea248
|
[
"MIT"
] | 17
|
2020-02-07T23:40:36.000Z
|
2020-12-22T16:38:44.000Z
|
eve_module/user_auth/text.py
|
alentoghostflame/StupidEveAppraisalBot
|
a78adb206efd3a4dc41cbbfb45ee0b8b61aea248
|
[
"MIT"
] | null | null | null |
EVE_AUTH_CONTROL_BRIEF = "EVE authorization manager."
EVE_AUTH_CONTROL_COMMAND_NOT_FOUND = "Command not found, do `auth` to list all the commands."
EVE_AUTH_CONTROL_MISSING_ARG_1 = "`auth list|create|delete|select|info|update|token`"
EVE_AUTH_CONTROL_HELP_DESCRIPTION = "Manages the level of authorization the bot has for your character(s). Most " \
"information about your account requires special authorization to access, and " \
"this command is how you give the bot access to that information. Use `auth` to " \
"list this embed."
EVE_AUTH_CONTROL_HELP_AUTHORIZING = "`auth create`: Creates a minimal authorization profile for your character of " \
"choice. Follow the instructions listed by posting that command for more " \
"information on how to do that.\n" \
"`auth delete character_id`: Replace `character_id` with the ID of the character " \
"you no longer want the bot to have authorization for.\n" \
"`auth update`: A different part of the bot may request you to do this command " \
"update your character authorization with more or less permissions. Note, this " \
"command can be used to register new characters with permissions already set.\n" \
"`auth update force`: Same as `auth update` but forces the link even if you " \
"already have all the permissions.\n" \
"`auth token`: Used to redeem an authorization token URL."
EVE_AUTH_CONTROL_HELP_UTILITY = "`auth select character_id`: Replace `character_id` with the ID of a character you " \
"wish to have all auth-requiring commands use.\n" \
"`auth gat`: Get Access Token (GAT), used for development purposes or people wanting " \
"to explore what they can do."
EVE_AUTH_CONTROL_HELP_INFORMATION = "`auth list`: Lists all the characters that the bot is authorized with along " \
"with IDs and what character you currently have selected.\n" \
"`auth list character_id`: Replace `character_id` with the ID of a character you " \
"wish to view more detailed permission-related info of."
EVE_AUTH_CONTROL_SELECT_MISSING_ARG = "`auth select character_id` selects a character for the rest of the bot to " \
"use. Replace `character_id` with the ID of the character listed in the " \
"command `auth list`"
EVE_AUTH_CONTROL_CREATE_TEXT = "<{}>\nClick that link, and authorize with the character you want to add. After " \
"authorization, you will be redirected to a page that can't be reached. Copy the url " \
"and run the command `auth token insert_link_here`, replacing `insert_link_here` with " \
"the URL you copied."
EVE_AUTH_CONTROL_DELETE_MISSING_ARG = "`auth delete character_id` removes a character from the bot. Replace " \
"`character_id` with the ID of the character to be deleted, found by running " \
"the command `auth list`"
EVE_AUTH_CONTROL_DELETE_SUCCESS = "Successfully deleted the character."
EVE_AUTH_CONTROL_SELECT_SUCCESS = "Selected character set to \"{}\""
EVE_AUTH_CONTROL_INFO_MISSING_ARG = "`auth list character_id` lists permission-related info about a character. " \
"Replace `character_id` with the ID of the character listed in the command " \
"`auth list`"
EVE_AUTH_CONTROL_UPDATE_SUCCESS = "<{}>\nClick on that link and authorize with the character you want to update " \
"permissions for. After authorization, you will be redirected to a page that can't " \
"be reached. Copy the URL and run the command `auth token insert_link_here`, " \
"replacing `insert_link_here` with the URL you copied.\n The link above can be " \
"used to update multiple characters, not just the currently selected one."
EVE_AUTH_CONTROL_UPDATE_CURRENT_DESIRED_EQUAL = "All desired scopes for the selected character seem to be already " \
"met. If you want to force the auth URL anyway, run `auth update force`"
EVE_AUTH_CONTROL_UPDATE_ARG_NOT_FORCE = "Did you mean `auth update force` ?"
EVE_AUTH_CONTROL_REGISTER_TOKEN_MISSING_ARG = "`auth token insert_link_here` registers a character for use by this " \
"bot. For use of this command, see `auth create`"
EVE_AUTH_CONTROL_REGISTER_TOKEN_INVALID = "Given URL is invalid."
EVE_AUTH_CONTROL_REGISTER_SUCCESS = "Token registered. Welcome, {}."
EVE_AUTH_CONTROL_CONTEXT_HAS_GUILD = "For account security reasons, this command only works in DMs."
CHARACTER_ID_NOT_FOUND = "ID not found, character IDs are listed in the command `auth list`"
NO_AUTH_SELECTED_CHARACTER = "It doesn't appear you have a selected character, do `auth select` to find out more."
| 86.184615
| 120
| 0.605677
|
2199f5f861efd8b173c92f5480f1161bb36b4e6e
| 214
|
py
|
Python
|
src/api/api.py
|
mariadb-developers/places-app-python
|
ead39db0a269a098ae4e0d700d05e3e87688ec89
|
[
"MIT"
] | 8
|
2020-08-19T00:12:59.000Z
|
2021-09-17T21:06:06.000Z
|
src/api/api.py
|
mariadb-developers/places-app-python
|
ead39db0a269a098ae4e0d700d05e3e87688ec89
|
[
"MIT"
] | 170
|
2022-01-09T07:56:20.000Z
|
2022-03-02T12:03:11.000Z
|
api/python/api.py
|
isabella232/dev-example-places
|
c75ebf8b50f1b0c9f852266b80d9951a9b3955cb
|
[
"MIT"
] | 8
|
2020-05-17T08:36:39.000Z
|
2022-02-01T08:26:32.000Z
|
import flask
from locations import locations
app = flask.Flask(__name__)
app.config["DEBUG"] = True
app.register_blueprint(locations)
@app.route("/api/version")
def version():
return "1.0"
app.run(port=8080)
| 17.833333
| 33
| 0.738318
|
37808bcf8d751d300a99371cbfeaa29134c60fd2
| 2,391
|
py
|
Python
|
tests/unit/core/test_platform_detection.py
|
tony/scout_apm_python
|
f477b09b1ef6e644980130d4d44954f27570ada2
|
[
"MIT"
] | 60
|
2018-04-15T04:09:39.000Z
|
2022-03-29T12:10:40.000Z
|
tests/unit/core/test_platform_detection.py
|
tony/scout_apm_python
|
f477b09b1ef6e644980130d4d44954f27570ada2
|
[
"MIT"
] | 326
|
2018-03-28T16:09:13.000Z
|
2022-03-03T13:50:23.000Z
|
tests/unit/core/test_platform_detection.py
|
tony/scout_apm_python
|
f477b09b1ef6e644980130d4d44954f27570ada2
|
[
"MIT"
] | 25
|
2018-05-30T17:59:46.000Z
|
2022-02-24T19:40:02.000Z
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from scout_apm.compat import string_type
from scout_apm.core import platform_detection
from tests.compat import mock
def test_get_triple():
triple = platform_detection.get_triple()
assert isinstance(triple, string_type)
assert platform_detection.is_valid_triple(triple)
@pytest.mark.parametrize(
"machine, system, result",
[
("x86_64", "Darwin", "x86_64-apple-darwin"),
("aarch64", "Darwin", "x86_64-apple-darwin"),
("x86_64", "Linux", "x86_64-unknown-linux-musl"),
("aarch64", "Linux", "aarch64-unknown-linux-musl"),
],
)
@mock.patch("platform.machine")
@mock.patch("platform.system")
def test_aarch64_apple_darwin_override(
platform_system, platform_machine, machine, system, result
):
platform_machine.return_value = machine
platform_system.return_value = system
assert platform_detection.get_triple() == result
@pytest.mark.parametrize(
"triple, validity",
[
("x86_64-apple-darwin", True),
("i686-unknown-linux-gnu", True),
("aarch64-unknown-linux-gnu", True),
("aarch64-unknown-linux-musl", True),
("x86_64-apple-darwin", True),
("unknown-unknown-linux-musl", True),
("", False),
("unknown", False),
("---", False),
("i686-apple-darwin", True),
("aarch64-apple-darwin", False),
],
)
def test_is_valid_triple(triple, validity):
assert platform_detection.is_valid_triple(triple) == validity
@pytest.mark.parametrize(
"machine, arch",
[
("i686", "i686"),
("x86_64", "x86_64"),
("aarch64", "aarch64"),
("i386", "unknown"),
("arm", "unknown"),
("", "unknown"),
],
)
@mock.patch("platform.machine")
def test_get_arch(platform_machine, machine, arch):
platform_machine.return_value = machine
assert platform_detection.get_arch() == arch
@pytest.mark.parametrize(
"system, platform",
[
("Darwin", "apple-darwin"),
("Linux", "unknown-linux-musl"),
("Windows", "unknown"),
("", "unknown"),
],
)
@mock.patch("platform.system")
def test_get_platform(platform_system, system, platform):
platform_system.return_value = system
assert platform_detection.get_platform() == platform
| 27.482759
| 82
| 0.646173
|
e18cb93c701594169e1d8e2f935343cdbbf7b7c2
| 7,189
|
py
|
Python
|
CodeToComputeAveragedHoldoutPerformances/main.py
|
fab464654/AnalisiDatiScienzeBiomediche_project
|
5c385667d14d5ea3b9cf9afcfa068841f3e3580c
|
[
"MIT"
] | null | null | null |
CodeToComputeAveragedHoldoutPerformances/main.py
|
fab464654/AnalisiDatiScienzeBiomediche_project
|
5c385667d14d5ea3b9cf9afcfa068841f3e3580c
|
[
"MIT"
] | null | null | null |
CodeToComputeAveragedHoldoutPerformances/main.py
|
fab464654/AnalisiDatiScienzeBiomediche_project
|
5c385667d14d5ea3b9cf9afcfa068841f3e3580c
|
[
"MIT"
] | null | null | null |
#Import Python libraries
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import seaborn as sns
#Import my scripts
from randomForest import my_random_forest, visualize_evaluation_metrics_RF, exai_rf
from svm import my_support_vector_machine, visualize_evaluation_metrics_SVC, exai_svm
from utility import histogram_features_and_labels, plot_accuracies_train_val_test, plot_performances
savingPath = "images/" #set the output plots path
#Import the dataset from 'processed.cleveland.data' file
featureNames = ['age', 'sex', 'chest pain type', 'resting blood pressure', 'serum cholesterol', 'fasting blood pressure',
'resting electrocardiographic results', 'maximum heart rate achieved', 'exercise induced angina',
'ST depression induced by exercise relative to rest', 'slope of the peak exercise ST segment',
'number of major vessels colored by flourosopy', 'thalassemia']
classNames = ["NO Heart disease", "Heart disease"]
datasetCols = featureNames + ['goal']
dataset = pd.read_csv('processed.cleveland.data', header=None, names=datasetCols) #no header present
#---- If some features have to be removed: -----#
#dataset = dataset.drop(labels=['thalassemia', 'number of major vessels colored by flourosopy', 'chest pain type'], axis=1)
#featureNames.remove('thalassemia')
#featureNames.remove('number of major vessels colored by flourosopy')
#featureNames.remove('chest pain type')
print(featureNames)
print(dataset.head) #to have a preview of the dataframe
#Retrieve true labels: according to the specs, if 'goal' is greater than 0, a heart disease is present, so the
# classification labels are simplified as True=Heart Disease; False=NO Heart Disease
labels = np.array(dataset['goal'] > 0)
#print(np.count_nonzero(labels), len(labels)-np.count_nonzero(labels)) #to check the class balance (139 vs 164)
#---------------------------------------------------------------------
# Retrieve data (as string, because of the needed "data cleaning"!) |
#---------------------------------------------------------------------
data = np.array(dataset.iloc[:, 0:len(featureNames)]).astype(str)
print(data.shape) #to check
#There are 6 '?' fields inside the dataset. A proposed solution to avoid removing that row is to substitute that random
#character with the most common value of that category
indices = np.where(np.char.find(data, "?") == 0)
#print(indices) #to check
for k in range(indices[0].size):
row = indices[0][k]
col = indices[1][k]
#print(row, col) #to check
unique, pos = np.unique(data[:, col], return_inverse=True) #to find all unique elements and their positions
counts = np.bincount(pos) #to count the number of each unique element
maxpos = counts.argmax() #to find the positions of the maximum count
#print("Before: ", data[row, col]) #to check
data[row, col] = unique[maxpos]
#print("After: ", data[row, col]) #to check
#-----------------------------
#Data scaling and transform |
#-----------------------------
scaler = MinMaxScaler() #StandardScaler()
new_data = scaler.fit_transform(data).astype(float)
numberOfPartitions = 20
meanACC_SVM = np.empty([1,3]) #accT, accV, accuracyTEST
meanPREC_SVM = np.empty([1,3]) #...
meanSENS_SVM = np.empty([1,3]) #...
meanSPEC_SVM = np.empty([1,3]) #...
meanACC_RF = np.empty([1,3]) #accT, accV, accuracyTEST
meanPREC_RF = np.empty([1,3]) #...
meanSENS_RF = np.empty([1,3]) #...
meanSPEC_RF = np.empty([1,3]) #...
for i in range(numberOfPartitions):
print("[SVM] Considering partition...", i)
print("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-**-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*")
#--------------------------------------------------
# RANDOMLY Split data and labels into train/test |
#--------------------------------------------------
trainData, testData, trainLabels, testLabels = train_test_split(new_data, labels, test_size=0.2, shuffle=True)
#To check the number of samples: shuffle=False because of the difference in performances
#print(np.count_nonzero(testLabels), len(testLabels)-np.count_nonzero(testLabels), np.count_nonzero(trainLabels), len(trainLabels)-np.count_nonzero(trainLabels))
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* Support Vector Machine *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-#
#----------------------------------------------------------------------------------------
# Call the SVM script, get best model, metrics and test predictions of the tuned model |
# ---------------------------------------------------------------------------------------
svc_best, bestC, bestKernel, ACC, PREC, SENS, SPEC, predictions, all_accT, all_accV = my_support_vector_machine(trainData, trainLabels, testData, testLabels)
#Store current values to then compute the mean
meanACC_SVM += np.array(ACC)
meanPREC_SVM += np.array(PREC)
meanSENS_SVM += np.array(SENS)
meanSPEC_SVM += np.array(SPEC)
print("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-**-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*")
print("[RF] Considering partition...", i)
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* Random Forest -*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-#
#--------------------------------------------------------------------------------------------------
# Call the Random Forest script, get best model, metrics and test predictions of the tuned model |
#--------------------------------------------------------------------------------------------------
rf_best, bestMaxDepth, bestMaxFeatures, bestMaxSamples, ACC, PREC, SENS, SPEC, predictions, all_accT, all_accV = my_random_forest(trainData, trainLabels, testData, testLabels)
#Store current values to then compute the mean
meanACC_RF += np.array(ACC)
meanPREC_RF += np.array(PREC)
meanSENS_RF += np.array(SENS)
meanSPEC_RF += np.array(SPEC)
print(meanACC_RF)
print("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-**-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*")
#Compute the mean performances over the "numberOfPartitions" iterations
meanACC_SVM = meanACC_SVM / numberOfPartitions
meanPREC_SVM = meanPREC_SVM / numberOfPartitions
meanSENS_SVM = meanSENS_SVM / numberOfPartitions
meanSPEC_SVM = meanSPEC_SVM / numberOfPartitions
meanACC_RF = meanACC_RF / numberOfPartitions
meanPREC_RF = meanPREC_RF / numberOfPartitions
meanSENS_RF = meanSENS_RF / numberOfPartitions
meanSPEC_RF = meanSPEC_RF / numberOfPartitions
# ---------------------
# Performances plots |
# ---------------------
plot_performances(testLabels, meanACC_SVM[0].tolist(), meanPREC_SVM[0].tolist(), meanSENS_SVM[0].tolist(), meanSPEC_SVM[0].tolist(), savingPath, "PERFORMANCES_SVM_best.jpg")
plot_performances(testLabels, meanACC_RF[0].tolist(), meanPREC_RF[0].tolist(), meanSENS_RF[0].tolist(), meanSPEC_RF[0].tolist(), savingPath, "PERFORMANCES_RF_best.jpg")
| 47.609272
| 180
| 0.601474
|
9fea1c06e9b514bffec79d3239c50683a8948054
| 1,547
|
py
|
Python
|
src/app/views/api/urls.py
|
deadlock-delegate/arkdelegates
|
8a5262f51b519ba3bc10094756c8866fc550df65
|
[
"MIT"
] | 2
|
2018-05-22T13:47:09.000Z
|
2018-05-23T12:45:05.000Z
|
src/app/views/api/urls.py
|
deadlock-delegate/arkdelegates
|
8a5262f51b519ba3bc10094756c8866fc550df65
|
[
"MIT"
] | 21
|
2018-05-08T12:56:46.000Z
|
2020-06-05T18:59:38.000Z
|
src/app/views/api/urls.py
|
deadlock-delegate/arkdelegates
|
8a5262f51b519ba3bc10094756c8866fc550df65
|
[
"MIT"
] | 4
|
2018-05-04T15:00:59.000Z
|
2019-02-13T02:39:07.000Z
|
from django.urls import path, re_path
from knox import views as knox_views
from app.views.api.auth import ClaimDelegate, LoginView
from app.views.api.contributions import Contributions
from app.views.api.delegates import Delegates
from app.views.api.news import News
urlpatterns = [
path("delegates/", Delegates.as_view(), name="api-delegates"),
re_path(
r"delegates/(?P<wallet_address>[0-9A-Za-z]{34})/$",
Delegates.as_view(),
name="api-delegate-a",
),
path("delegates/<slug:delegate_slug>/", Delegates.as_view(), name="api-delegate-b"),
# contributions
path(
"contributions/<str:contribution_id>/", Contributions.as_view(), name="api-contributions-id"
),
path(
"contributions/<slug:delegate_slug>/",
Contributions.as_view(),
name="api-contributions-slug",
),
path("contributions/", Contributions.as_view(), name="api-contributions"), # old
# nnews
path("news/<str:news_id>/", News.as_view(), name="api-news-id"),
path("news/<slug:delegate_slug>/", News.as_view(), name="api-news-slug"),
path("news/", News.as_view(), name="api-news"), # old
# auth
path("auth/login/", LoginView.as_view(), name="api-auth-login"),
path("auth/logout/", knox_views.LogoutView.as_view(), name="knox_logout"),
path("auth/logoutall/", knox_views.LogoutAllView.as_view(), name="knox_logoutall"),
path(
"auth/claim-delegate/<slug:delegate_slug>/",
ClaimDelegate.as_view(),
name="api-claim-delegate",
),
]
| 36.833333
| 100
| 0.659987
|
2a5828b642ee9f08f20f6972202bf13d65d637bd
| 2,343
|
py
|
Python
|
python/orgdown.py
|
BaseCase/orgdown
|
eb4d610d8c1b2b2abb58bd96d18cc22723ae8a91
|
[
"MIT"
] | 3
|
2015-03-08T02:41:12.000Z
|
2021-04-27T18:37:14.000Z
|
python/orgdown.py
|
BaseCase/orgdown
|
eb4d610d8c1b2b2abb58bd96d18cc22723ae8a91
|
[
"MIT"
] | 3
|
2015-03-16T23:51:42.000Z
|
2015-03-26T19:37:08.000Z
|
python/orgdown.py
|
BaseCase/orgdown
|
eb4d610d8c1b2b2abb58bd96d18cc22723ae8a91
|
[
"MIT"
] | null | null | null |
import vim
def jump_to_next_heading():
current_row = vim.current.window.cursor[0]
next_row_with_heading = _find_next_heading_row(current_row)
if next_row_with_heading > -1:
vim.current.window.cursor = (next_row_with_heading, 0)
def jump_to_previous_heading():
current_row = vim.current.window.cursor[0]
prev_row_with_heading = _find_previous_heading_row(current_row)
if prev_row_with_heading > -1:
vim.current.window.cursor = (prev_row_with_heading, 0)
def cycle_visibility_local():
current_row = vim.current.window.cursor[0]
if not _is_heading_row(current_row): return
# it's a closed fold; open it
if _is_closed_fold(current_row):
vim.command("foldopen")
# it's open; fold down to next heading
else:
next_header_row = _next_heading_row_of_equal_or_lesser_depth(current_row)
if next_header_row > -1:
vim.command("{0},{1}fold".format(current_row, next_header_row - 1))
else:
end = len(vim.current.buffer)
vim.command("{0},{1}fold".format(current_row, end))
def _is_closed_fold(row_num):
print("checking row {}".format(row_num))
res = int(vim.eval("foldclosed({})".format(row_num)))
return res > -1
def _next_heading_row_of_equal_or_lesser_depth(row_num):
depth = _header_depth_of_row(row_num)
rows_below = vim.current.buffer[row_num:]
for index, row in enumerate(rows_below):
if row.startswith('#'):
this_row_num = index + row_num + 1
this_depth = _header_depth_of_row(this_row_num)
if this_depth <= depth:
return this_row_num
return -1
def _is_heading_row(row_num):
row = vim.current.buffer[row_num-1]
return row.startswith('#')
def _header_depth_of_row(row_num):
row = vim.current.buffer[row_num-1]
return row.find(' ')
def _find_next_heading_row(start_from):
rows_below = vim.current.buffer[start_from:]
for index, row in enumerate(rows_below):
if row.startswith('#'):
return index + start_from + 1
return -1
def _find_previous_heading_row(start_from):
rows_above = reversed(vim.current.buffer[0 : start_from - 1])
for index, row in enumerate(rows_above):
if row.startswith('#'):
return start_from - index - 1
return -1
| 30.038462
| 81
| 0.677764
|
13f05221e32a409af9da418e8aacc5c7c27fe96c
| 2,697
|
py
|
Python
|
focal_loss_with_smoothing.py
|
Kageshimasu/focal-loss-with-smoothing
|
23492cf2b6575a945f68f95179ef5d6288c63a76
|
[
"MIT"
] | 3
|
2021-03-11T07:55:15.000Z
|
2021-03-25T02:54:46.000Z
|
focal_loss_with_smoothing.py
|
Kageshimasu/focal-loss-with-smoothing
|
23492cf2b6575a945f68f95179ef5d6288c63a76
|
[
"MIT"
] | null | null | null |
focal_loss_with_smoothing.py
|
Kageshimasu/focal-loss-with-smoothing
|
23492cf2b6575a945f68f95179ef5d6288c63a76
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLossWithSmoothing(nn.Module):
def __init__(
self,
num_classes: int,
gamma: int = 1,
lb_smooth: float = 0.1,
size_average: bool = True,
ignore_index: int = None,
alpha: float = None):
"""
:param gamma:
:param lb_smooth:
:param ignore_index:
:param size_average:
:param alpha:
"""
super(FocalLossWithSmoothing, self).__init__()
self._num_classes = num_classes
self._gamma = gamma
self._lb_smooth = lb_smooth
self._size_average = size_average
self._ignore_index = ignore_index
self._log_softmax = nn.LogSoftmax(dim=1)
self._alpha = alpha
if self._num_classes <= 1:
raise ValueError('The number of classes must be 2 or higher')
if self._gamma < 0:
raise ValueError('Gamma must be 0 or higher')
if self._alpha is not None:
if self._alpha <= 0 or self._alpha >= 1:
raise ValueError('Alpha must be 0 <= alpha <= 1')
def forward(self, logits, label):
"""
:param logits: (batch_size, class, height, width)
:param label:
:return:
"""
logits = logits.float()
difficulty_level = self._estimate_difficulty_level(logits, label)
with torch.no_grad():
label = label.clone().detach()
if self._ignore_index is not None:
ignore = label.eq(self._ignore_index)
label[ignore] = 0
lb_pos, lb_neg = 1. - self._lb_smooth, self._lb_smooth / (self._num_classes - 1)
lb_one_hot = torch.empty_like(logits).fill_(
lb_neg).scatter_(1, label.unsqueeze(1), lb_pos).detach()
logs = self._log_softmax(logits)
loss = -torch.sum(difficulty_level * logs * lb_one_hot, dim=1)
if self._ignore_index is not None:
loss[ignore] = 0
return loss.mean()
def _estimate_difficulty_level(self, logits, label):
"""
:param logits:
:param label:
:return:
"""
one_hot_key = torch.nn.functional.one_hot(label, num_classes=self._num_classes)
if len(one_hot_key.shape) == 4:
one_hot_key = one_hot_key.permute(0, 3, 1, 2)
if one_hot_key.device != logits.device:
one_hot_key = one_hot_key.to(logits.device)
pt = one_hot_key * F.softmax(logits)
difficulty_level = torch.pow(1 - pt, self._gamma)
return difficulty_level
| 34.139241
| 92
| 0.577679
|
ceaf7bafa098354055b996c0cacacc6c831469f7
| 717
|
py
|
Python
|
IOPool/Input/test/preMerge2_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
IOPool/Input/test/preMerge2_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
IOPool/Input/test/preMerge2_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TESTOUTPUT")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.Thing = cms.EDProducer("ThingProducer")
process.OtherThing = cms.EDProducer("OtherThingProducer",
useRefs = cms.untracked.bool(True)
)
process.output = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring(
'keep *',
'drop edmtestThings_*__*'
),
fileName = cms.untracked.string('file:big.root')
)
process.source = cms.Source("EmptySource")
process.p = cms.Path(process.Thing*process.OtherThing)
process.ep = cms.EndPath(process.output)
| 24.724138
| 60
| 0.737796
|
40e1cf35c61b3e7ccbce5af2e0a9cfc485e4b8c6
| 292
|
py
|
Python
|
image_enhance/equ-hist.py
|
BluceMengyu/Multithreaded-face-recognition-infrared-temperature-measurement-alarm-system-based-on-python
|
b4f9a17a4c6e913614161fdf548e69a406c8295a
|
[
"MIT"
] | 3
|
2020-11-13T07:04:13.000Z
|
2021-03-14T10:53:53.000Z
|
image_enhance/equ-hist.py
|
BluceMengyu/Multithreaded-face-recognition-infrared-temperature-measurement-alarm-system-based-on-python
|
b4f9a17a4c6e913614161fdf548e69a406c8295a
|
[
"MIT"
] | null | null | null |
image_enhance/equ-hist.py
|
BluceMengyu/Multithreaded-face-recognition-infrared-temperature-measurement-alarm-system-based-on-python
|
b4f9a17a4c6e913614161fdf548e69a406c8295a
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import cv2
import numpy as np
from matplotlib import pyplot as plt
def main():
img = cv2.imread("4.jpg",0)
eq = cv2.equalizeHist(img)
cv2.imshow("Histogram Equalization", np.hstack([img, eq]))
cv2.waitKey(0)
if __name__ == '__main__':
main()
| 16.222222
| 62
| 0.633562
|
6d00c9f28efac5ce81a4d0349e5ab027674b8930
| 5,980
|
py
|
Python
|
sector.py
|
maytim/historic-top-gainers-and-losers
|
6446e67013197bf078185b7579ae50a465d63ad2
|
[
"MIT"
] | null | null | null |
sector.py
|
maytim/historic-top-gainers-and-losers
|
6446e67013197bf078185b7579ae50a465d63ad2
|
[
"MIT"
] | null | null | null |
sector.py
|
maytim/historic-top-gainers-and-losers
|
6446e67013197bf078185b7579ae50a465d63ad2
|
[
"MIT"
] | null | null | null |
# imports
import time
import json
import requests
from decimal import *
from datetime import date, datetime, timedelta
from heapq import nsmallest, nlargest
from timeit import default_timer
from functools import wraps
# wrapper function to report elapsed time of functions
def elapsed_time(function):
@wraps(function)
def wrapper(*args, **kwargs):
start = default_timer()
result = function(*args, **kwargs)
end = default_timer()
print(function.__name__, '%.2f seconds' % (end-start))
return result
return wrapper
class sector:
# Constructor
# Accepts a dictionary of the sector mapping the industry IDs to industry names
def __init__(self,sector_dict, start, end):
# initialize returns data
self.initialize_returns(start,end)
# get a list of the keys (IDs) of the industries
industry_ids = list(sector_dict.keys())
# load the industry data
self.data_scope = self.get_industry_data(industry_ids)
# add the names to the industries
self.name_industries(sector_dict)
# load returns data
self.get_returns_data(start,end)
def get_industry_data(self, industry_list, result=None):
result = {}
query_data = self.yahoo_finance_industry_query(industry_list)
# set up the sector by grabbing the companies for each industry
for industry in query_data['query']['results']['industry']:
id = industry['id']
result[id] = {}
result[id]['name'] = ""
result[id]['companies'] = []
result[id]['quotes'] = {}
if 'company' in industry:
for company in industry['company']:
result[id]['companies'].append(company['symbol'])
return result
def yahoo_finance_query(self, query):
query_start = 'https://query.yahooapis.com/v1/public/yql?q='
query_end = '&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback='
url = query_start + query + query_end
return requests.get(url).json()
def yahoo_finance_industry_query(self,industries):
# generate the list of industries for the yahoo query
query = '('
for i in industries:
query += str(i)+'%2C'
query = query[:-3] + ')'
return self.yahoo_finance_query('select%20*%20from%20yahoo.finance.industry%20where%20id%20in%20'+query)
def yahoo_finance_historical_query(self, company, start, end):
query_1 = "select%20*%20from%20yahoo.finance.historicaldata%20where%20symbol%20%3D%20%22"
query_2 = "%22%20and%20startDate%20%3D%20%22"
query_3 = "%22%20and%20endDate%20%3D%20%22"
query_4 = "%22"
return self.yahoo_finance_query(query_1 + company + query_2 + start + query_3 + end + query_4)
# a quick function to label the industries in the sector
def name_industries(self, sector_dict):
for key in self.data_scope:
self.data_scope[key]['name'] = sector_dict[int(key)]
# a quick function to save the scraped data to a .txt
def save_to_txt(self, filename):
pass
### TODO
#with open(str(filename)+'.txt','w') as outfile:
#json.dump(self.data,outfile)
@elapsed_time
def get_returns_data(self, start, end):
# get each industry within the sector database
for key in self.data_scope:
# get the industry data with the key
industry = self.data_scope[key]
# get the list of companies within each industry
for company in industry['companies']:
raw_data = self.yahoo_finance_historical_query(company,start,end)
# check that the quote data was correctly loaded
# a possiblity for an error could be lack of data for stock X in the given timeframe
if raw_data['query']['results'] is not None:
# parse the 'raw_data' to get the quotes for each day
for data in raw_data['query']['results']['quote']:
# for the case where a single quote is returned then don't iterate through the list
if isinstance(data,str):
data = raw_data['query']['results']['quote']
# cache the relevant data
date = data['Date']
price_open = data['Open']
price_close = data['Close']
# calculate the daily return
# Note: used Decimal() in order to get precise results that can be rounded to 2 places
price_return = round((Decimal(price_close)-Decimal(price_open))/Decimal(price_open) * 100,2)
# check if return needs to be generated for date and then add company
if price_return not in self.returns[date]:
self.returns[date][price_return] = []
self.returns[date][price_return].append(company)
# helper function to generate empty dictionaries of dates (designed for gainers / losers)
@elapsed_time
def initialize_returns(self,start,end):
# takes dates (start, stop) in format '%Y-%m-%d'
start_date = datetime.strptime(start,'%Y-%m-%d')
end_date = datetime.strptime(end,'%Y-%m-%d')
self.returns = {}
# iterate through dates from start to end
current = start_date
while current <= end_date:
# create keys in the dict for each day initialized with []
self.returns[current.strftime('%Y-%m-%d')] = {}
current += timedelta(days=1)
@elapsed_time
def get_gainers(self,date,count):
return nlargest(count, self.returns[date].items())
@elapsed_time
def get_losers(self,date,count):
return nsmallest(count, self.returns[date].items())
| 41.818182
| 116
| 0.611371
|
e9ac22d9296c11c1a97be98d43bdfea01b851d9a
| 27,429
|
py
|
Python
|
ginga/gtkw/Widgets.py
|
eteq/ginga
|
4a9b9bc05c8bb212a3fb90e077ca20c6bee1ffd9
|
[
"BSD-3-Clause"
] | null | null | null |
ginga/gtkw/Widgets.py
|
eteq/ginga
|
4a9b9bc05c8bb212a3fb90e077ca20c6bee1ffd9
|
[
"BSD-3-Clause"
] | null | null | null |
ginga/gtkw/Widgets.py
|
eteq/ginga
|
4a9b9bc05c8bb212a3fb90e077ca20c6bee1ffd9
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Widgets.py -- wrapped Gtk widgets and convenience functions
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.gtkw import GtkHelp, gtksel, GtkMain
import gtk
import gobject
from ginga.misc import Callback, Bunch
from functools import reduce
class WidgetError(Exception):
"""For errors thrown in this module."""
pass
# BASE
class WidgetBase(Callback.Callbacks):
def __init__(self):
super(WidgetBase, self).__init__()
self.widget = None
def get_widget(self):
return self.widget
def set_tooltip(self, text):
self.widget.set_tooltip_text(text)
def set_enabled(self, tf):
self.widget.set_sensitive(tf)
# BASIC WIDGETS
class TextEntry(WidgetBase):
def __init__(self, text=''):
super(TextEntry, self).__init__()
w = gtk.Entry()
w.set_text(text)
w.connect('activate', self._cb_redirect)
self.widget = w
self.enable_callback('activated')
def _cb_redirect(self, *args):
self.make_callback('activated')
def get_text(self):
return self.widget.get_text()
def set_text(self, text):
self.widget.set_text(text)
def set_length(self, numchars):
# this only sets the visible length of the widget
self.widget.set_width_chars(numchars)
pass
class TextEntrySet(WidgetBase):
def __init__(self, text=''):
super(TextEntrySet, self).__init__()
hbox = gtk.HBox()
hbox.set_spacing(4)
w = gtk.Entry()
w.set_text(text)
hbox.pack_start(w, fill=True)
w.connect('activate', self._cb_redirect)
self.entry = w
w = gtk.Button('Set')
w.connect('clicked', self._cb_redirect)
hbox.pack_start(w, fill=False)
self.btn = w
self.widget = hbox
self.enable_callback('activated')
def _cb_redirect(self, *args):
self.make_callback('activated')
def get_text(self):
return self.entry.get_text()
def set_text(self, text):
self.entry.set_text(text)
def set_length(self, numchars):
#self.widget.set_width_chars(numchars)
pass
class TextArea(WidgetBase):
def __init__(self, wrap=False, editable=False):
super(TextArea, self).__init__()
tw = gtk.TextView()
if wrap:
tw.set_wrap_mode(gtk.WRAP_WORD)
else:
tw.set_wrap_mode(gtk.WRAP_NONE)
tw.set_editable(editable)
self.widget = tw
self.histlimit = 0
def append_text(self, text, autoscroll=True):
buf = self.widget.get_buffer()
end = buf.get_end_iter()
buf.insert(end, text)
if self.histlimit > 0:
self._history_housekeeping()
if not autoscroll:
return
end = buf.get_end_iter()
mark = buf.get_insert()
#self.widget.scroll_to_iter(end, 0.5)
# NOTE: this was causing a segfault if the text widget is
# not mapped yet! Seems to be fixed in recent versions of
# gtk
buf.move_mark(mark, end)
res = self.widget.scroll_to_mark(mark, 0.2, True)
def get_text(self):
buf = self.widget.get_buffer()
return buf.get_text()
def _history_housekeeping(self):
# remove some lines to keep us within our history limit
buf = self.widget.get_buffer()
numlines = buf.get_line_count()
if numlines > self.histlimit:
rmcount = int(numlines - self.histlimit)
start = buf.get_iter_at_line(0)
end = buf.get_iter_at_line(rmcount)
buf.delete(start, end)
def clear(self):
buf = self.widget.get_buffer()
start = buf.get_start_iter()
end = buf.get_end_iter()
buf.delete(start, end)
def set_text(self, text):
self.clear()
self.append_text(text)
def set_limit(self, numlines):
self.histlimit = numlines
self._history_housekeeping()
def set_font(self, font):
self.widget.modify_font(font)
def set_wrap(self, tf):
if tf:
self.widget.set_wrap_mode(gtk.WRAP_WORD)
else:
self.widget.set_wrap_mode(gtk.WRAP_NONE)
class Label(WidgetBase):
def __init__(self, text=''):
super(Label, self).__init__()
self.widget = gtk.Label(text)
def get_text(self):
return self.widget.get_text()
def set_text(self, text):
self.widget.set_text(text)
class Button(WidgetBase):
def __init__(self, text=''):
super(Button, self).__init__()
w = gtk.Button(text)
self.widget = w
w.connect('clicked', self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, *args):
self.make_callback('activated')
class ComboBox(WidgetBase):
def __init__(self, editable=False):
super(ComboBox, self).__init__()
if editable:
cb = GtkHelp.ComboBoxEntry()
else:
cb = GtkHelp.ComboBox()
liststore = gtk.ListStore(gobject.TYPE_STRING)
cb.set_model(liststore)
cell = gtk.CellRendererText()
cb.pack_start(cell, True)
cb.add_attribute(cell, 'text', 0)
self.widget = cb
self.widget.sconnect('changed', self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, widget):
idx = widget.get_active()
self.make_callback('activated', idx)
def insert_alpha(self, text):
model = self.widget.get_model()
tup = (text, )
j = 0
for i in range(len(model)):
j = i
if model[i][0] > text:
model.insert(j, tup)
return
model.insert(j+1, tup)
def append_text(self, text):
model = self.widget.get_model()
tup = (text, )
idx = len(model)
model.insert(idx, tup)
def insert_text(self, idx, text):
model = self.widget.get_model()
tup = (text, )
model.insert(idx, tup)
def delete_alpha(self, text):
model = self.widget.get_model()
for i in range(len(model)):
if model[i][0] == text:
del model[i]
return
def clear(self):
model = self.widget.get_model()
model.clear()
def show_text(self, text):
model = self.widget.get_model()
for i in range(len(model)):
if model[i][0] == text:
self.widget.set_active(i)
return
def set_index(self, index):
self.widget.set_active(index)
def get_index(self):
return self.widget.get_active()
class SpinBox(WidgetBase):
def __init__(self, dtype=int):
super(SpinBox, self).__init__()
self.widget = GtkHelp.SpinButton()
# if not gtksel.have_gtk3:
# self.widget.set_update_policy(gtk.UPDATE_DISCONTINUOUS)
self.widget.sconnect('value-changed', self._cb_redirect)
self.enable_callback('value-changed')
def _cb_redirect(self, w):
val = w.get_value()
self.make_callback('value-changed', val)
def get_value(self):
return self.widget.get_value()
def set_value(self, val):
self.widget.set_value(val)
def set_decimals(self, num):
self.widget.set_digits(num)
def set_limits(self, minval, maxval, incr_value=1):
adj = self.widget.get_adjustment()
adj.configure(minval, minval, maxval, incr_value, incr_value, 0)
class Slider(WidgetBase):
def __init__(self, orientation='horizontal', track=False):
super(Slider, self).__init__()
if orientation == 'horizontal':
w = GtkHelp.HScale()
# TEMP: hack because scales don't seem to expand as expected
w.set_size_request(200, -1)
else:
w = GtkHelp.VScale()
w.set_size_request(-1, 200)
self.widget = w
w.set_draw_value(True)
w.set_value_pos(gtk.POS_BOTTOM)
self.set_tracking(track)
w.sconnect('value-changed', self._cb_redirect)
self.enable_callback('value-changed')
def _cb_redirect(self, range):
val = range.get_value()
self.make_callback('value-changed', val)
def get_value(self):
return self.widget.get_value()
def set_value(self, val):
self.widget.set_value(val)
def set_tracking(self, tf):
if tf:
self.widget.set_update_policy(gtk.UPDATE_CONTINUOUS)
else:
self.widget.set_update_policy(gtk.UPDATE_DISCONTINUOUS)
def set_limits(self, minval, maxval, incr_value=1):
adj = self.widget.get_adjustment()
adj.configure(minval, minval, maxval, incr_value, incr_value, 0)
class ScrollBar(WidgetBase):
def __init__(self, orientation='horizontal'):
super(ScrollBar, self).__init__()
if orientation == 'horizontal':
self.widget = gtk.HScrollbar()
else:
self.widget = gtk.VScrollbar()
self.widget.connect('value-changed', self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, range):
val = range.get_value()
self.make_callback('activated', val)
class CheckBox(WidgetBase):
def __init__(self, text=''):
super(CheckBox, self).__init__()
self.widget = GtkHelp.CheckButton(text)
self.widget.sconnect('toggled', self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, widget):
val = widget.get_active()
self.make_callback('activated', val)
def set_state(self, tf):
self.widget.set_active(tf)
def get_state(self):
return self.widget.get_active()
class ToggleButton(WidgetBase):
def __init__(self, text=''):
super(ToggleButton, self).__init__()
w = GtkHelp.ToggleButton(text)
w.set_mode(True)
self.widget = w
self.widget.sconnect('toggled', self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, widget):
val = widget.get_active()
self.make_callback('activated', val)
def set_state(self, tf):
self.widget.set_active(tf)
def get_state(self):
return self.widget.get_active()
class RadioButton(WidgetBase):
def __init__(self, text='', group=None):
super(RadioButton, self).__init__()
if group is not None:
group = group.get_widget()
self.widget = GtkHelp.RadioButton(group, text)
self.widget.connect('toggled', self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, widget):
val = widget.get_active()
self.make_callback('activated', val)
def set_state(self, tf):
self.widget.set_active(tf)
def get_state(self):
return self.widget.get_active()
class ProgressBar(WidgetBase):
def __init__(self):
super(ProgressBar, self).__init__()
w = gtk.ProgressBar()
# GTK3
#w.set_orientation(gtk.ORIENTATION_HORIZONTAL)
#w.set_inverted(False)
self.widget = w
def set_value(self, pct):
pct = float(pct)
self.widget.set_fraction(pct)
self.widget.set_text("%.2f %%" % (pct * 100.0))
# CONTAINERS
class ContainerBase(WidgetBase):
def __init__(self):
super(ContainerBase, self).__init__()
self.children = []
def add_ref(self, ref):
# TODO: should this be a weakref?
self.children.append(ref)
def _remove(self, childw):
self.widget.remove(childw)
def remove(self, w):
if not w in self.children:
raise KeyError("Widget is not a child of this container")
self.children.remove(w)
self._remove(w.get_widget())
def remove_all(self):
for w in list(self.children):
self.remove(w)
def get_children(self):
return self.children
class Box(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Box, self).__init__()
if orientation == 'horizontal':
self.widget = gtk.HBox()
else:
self.widget = gtk.VBox()
def set_spacing(self, val):
self.widget.set_spacing(val)
def set_margins(self, left, right, top, bottom):
# TODO: can this be made more accurate?
self.widget.set_border_width(left)
def set_border_width(self, pix):
self.widget.set_border_width(pix)
def add_widget(self, child, stretch=0.0):
self.add_ref(child)
child_w = child.get_widget()
# TODO: can this be made more accurate?
expand = (float(stretch) != 0.0)
self.widget.pack_start(child_w, expand=expand, fill=True)
self.widget.show_all()
class VBox(Box):
def __init__(self):
super(VBox, self).__init__(orientation='vertical')
class HBox(Box):
def __init__(self):
super(HBox, self).__init__(orientation='horizontal')
class Frame(ContainerBase):
def __init__(self, title=None):
super(Frame, self).__init__()
fr = gtk.Frame(label=title)
fr.set_shadow_type(gtk.SHADOW_ETCHED_IN)
fr.set_label_align(0.10, 0.5)
self.widget = fr
def set_widget(self, child):
self.remove_all()
self.add_ref(child)
self.widget.add(child.get_widget())
self.widget.show_all()
class Expander(ContainerBase):
def __init__(self, title=None):
super(Expander, self).__init__()
w = gtk.Expander(label=title)
self.widget = w
def set_widget(self, child):
self.remove_all()
self.add_ref(child)
self.widget.add(child.get_widget())
self.widget.show_all()
class TabWidget(ContainerBase):
def __init__(self, tabpos='top'):
super(TabWidget, self).__init__()
nb = gtk.Notebook()
nb.set_show_border(False)
nb.set_scrollable(True)
if tabpos == 'top':
nb.set_tab_pos(gtk.POS_TOP)
elif tabpos == 'bottom':
nb.set_tab_pos(gtk.POS_BOTTOM)
elif tabpos == 'left':
nb.set_tab_pos(gtk.POS_LEFT)
elif tabpos == 'right':
nb.set_tab_pos(gtk.POS_RIGHT)
nb.connect("switch-page", self._cb_redirect)
self.widget = nb
self.enable_callback('activated')
def _cb_redirect(self, nbw, gptr, index):
self.make_callback('activated', index)
def add_widget(self, child, title=''):
self.add_ref(child)
child_w = child.get_widget()
label = gtk.Label(title)
self.widget.append_page(child_w, label)
self.widget.show_all()
def get_index(self):
return self.widget.get_current_page()
def set_index(self, idx):
self.widget.set_current_page(idx)
def index_of(self, child):
return self.widget.page_num(child.get_widget())
class StackWidget(TabWidget):
def __init__(self):
super(StackWidget, self).__init__()
nb = self.widget
#nb.set_scrollable(False)
nb.set_show_tabs(False)
nb.set_show_border(False)
class ScrollArea(ContainerBase):
def __init__(self):
super(ScrollArea, self).__init__()
sw = gtk.ScrolledWindow()
sw.set_border_width(2)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.widget = sw
def set_widget(self, child):
self.remove_all()
self.add_ref(child)
self.widget.add_with_viewport(child.get_widget())
self.widget.show_all()
class Splitter(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Splitter, self).__init__()
self.orientation = orientation
if orientation == 'horizontal':
w = gtk.HPaned()
else:
w = gtk.VPaned()
self.widget = w
def add_widget(self, child):
self.add_ref(child)
child_w = child.get_widget()
if len(self.children) == 1:
#self.widget.pack1(child_w, resize=True, shrink=True)
self.widget.pack1(child_w)
else:
self.widget.pack2(child_w)
self.widget.show_all()
class GridBox(ContainerBase):
def __init__(self, rows=1, columns=1):
super(GridBox, self).__init__()
w = gtk.Table(rows=rows, columns=columns)
self.widget = w
def set_row_spacing(self, val):
self.widget.set_row_spacings(val)
def set_column_spacing(self, val):
self.widget.set_col_spacings(val)
def add_widget(self, child, row, col, stretch=0):
self.add_ref(child)
w = child.get_widget()
if stretch > 0:
xoptions = gtk.EXPAND|gtk.FILL
else:
xoptions = gtk.FILL
self.widget.attach(w, col, col+1, row, row+1, xoptions=xoptions)
self.widget.show_all()
class Toolbar(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Toolbar, self).__init__()
w = gtk.Toolbar()
w.set_style(gtk.TOOLBAR_ICONS)
if orientation == 'horizontal':
w.set_orientation(gtk.ORIENTATION_HORIZONTAL)
else:
w.set_orientation(gtk.ORIENTATION_VERTICAL)
self.widget = w
def add_action(self, text, toggle=False, iconpath=None):
if toggle:
child = ToggleButton(text)
else:
child = Button(text)
if iconpath is not None:
pixbuf = gtksel.pixbuf_new_from_file_at_size(iconpath, 24, 24)
if pixbuf is not None:
image = gtk.image_new_from_pixbuf(pixbuf)
child.get_widget().set_image(image)
self.add_widget(child)
return child
def add_widget(self, child):
self.add_ref(child)
w = child.get_widget()
self.widget.append_widget(w, None, None)
def add_separator(self):
self.widget.append_space()
class MenuAction(WidgetBase):
def __init__(self, text=None):
super(MenuAction, self).__init__()
self.text = text
self.widget = gtk.MenuItem(label=text)
self.widget.show()
self.widget.connect('activate', self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, *args):
# TODO: checkable menu items
self.make_callback('activated')
class Menu(ContainerBase):
def __init__(self):
super(Menu, self).__init__()
self.widget = gtk.Menu()
self.widget.show()
def add_widget(self, child):
menuitem_w = child.get_widget()
self.widget.append(menuitem_w)
self.add_ref(child)
#self.widget.show_all()
def add_name(self, name):
child = MenuAction(text=name)
self.add_widget(child)
return child
def add_separator(self):
sep = gtk.SeparatorMenuItem()
self.widget.append(sep)
sep.show()
class Menubar(ContainerBase):
def __init__(self):
super(Menubar, self).__init__()
self.widget = gtk.MenuBar()
def add_widget(self, child):
menu_w = child.get_widget()
self.widget.addMenu(menu_w)
self.add_ref(child)
menu_w.show()
return child
def add_name(self, name):
item_w = gtk.MenuItem(label=name)
child = Menu()
self.add_ref(child)
item_w.set_submenu(child.get_widget())
self.widget.append(item_w)
item_w.show()
return child
class TopLevel(ContainerBase):
def __init__(self, title=None):
super(TopLevel, self).__init__()
widget = GtkHelp.TopLevel()
self.widget = widget
widget.set_border_width(0)
widget.connect("destroy", self._quit)
widget.connect("delete_event", self._closeEvent)
if not title is None:
widget.set_title(title)
self.enable_callback('closed')
def set_widget(self, child):
self.add_ref(child)
child_w = child.get_widget()
self.widget.add(child_w)
def show(self):
self.widget.show_all()
def hide(self):
self.widget.hide()
def _quit(self, *args):
self.close()
def _closeEvent(self, widget, event):
self.close()
def close(self):
try:
self.widget.destroy()
except Exception as e:
pass
#self.widget = None
self.make_callback('closed')
def raise_(self):
window = self.widget.get_window()
## if window:
## if hasattr(window, 'present'):
## # gtk3 ?
## window.present()
## else:
## # gtk2
## window.show()
window.raise_()
def lower(self):
window = self.widget.get_window()
window.lower()
def resize(self, width, height):
self.widget.set_size_request(width, height)
def focus(self):
window = self.widget.get_window()
window.focus()
def move(self, x, y):
window = self.widget.get_window()
window.move(x, y)
def maximize(self):
window = self.widget.get_window()
window.maximize()
def unmaximize(self):
window = self.widget.get_window()
window.unmaximize()
def fullscreen(self):
window = self.widget.get_window()
window.fullscreen()
def unfullscreen(self):
window = self.widget.get_window()
window.unfullscreen()
def iconify(self):
window = self.widget.get_window()
window.iconify()
def uniconify(self):
window = self.widget.get_window()
window.deiconify()
def set_title(self, title):
self.widget.set_title(title)
class Application(GtkMain.GtkMain):
def __init__(self, *args, **kwdargs):
super(Application, self).__init__(*args, **kwdargs)
self.window_list = []
def window(self, title=None):
w = TopLevel(title=title)
self.window_list.append(w)
return w
class SaveDialog:
def __init__(self, title='Save File', selectedfilter=None):
action = gtk.FILE_CHOOSER_ACTION_SAVE
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK)
self.widget = gtk.FileChooserDialog(title=title, action=action, buttons=buttons)
self.selectedfilter = selectedfilter
if selectedfilter is not None:
self._add_filter(selectedfilter)
def _add_filter(self, selectedfilter):
filtr = gtk.FileFilter()
filtr.add_pattern(selectedfilter)
if 'png' in selectedfilter:
filtr.set_name('Image (*.png)')
self.selectedfilter = '.png'
elif 'avi' in selectedfilter:
filtr.set_name('Movie (*.avi)')
self.selectedfilter = '.avi'
elif 'npz' in selectedfilter:
filtr.set_name('Numpy Compressed Archive (*.npz)')
self.selectedfilter = '.npz'
self.widget.add_filter(filtr)
def get_path(self):
response = self.widget.run()
if response == gtk.RESPONSE_OK:
path = self.widget.get_filename()
if self.selectedfilter is not None and not path.endswith(self.selectedfilter):
path += self.selectedfilter
self.widget.destroy()
return path
elif response == gtk.RESPONSE_CANCEL:
self.widget.destroy()
return None
# MODULE FUNCTIONS
def name_mangle(name, pfx=''):
newname = []
for c in name.lower():
if not (c.isalpha() or c.isdigit() or (c == '_')):
newname.append('_')
else:
newname.append(c)
return pfx + ''.join(newname)
def make_widget(title, wtype):
if wtype == 'label':
w = Label(title)
w.get_widget().set_alignment(0.95, 0.5)
elif wtype == 'llabel':
w = Label(title)
w.get_widget().set_alignment(0.05, 0.95)
elif wtype == 'entry':
w = TextEntry()
#w.get_widget().set_width_chars(12)
elif wtype == 'entryset':
w = TextEntrySet()
elif wtype == 'combobox':
w = ComboBox()
elif wtype == 'spinbutton':
w = SpinBox(dtype=int)
elif wtype == 'spinfloat':
w = SpinBox(dtype=float)
elif wtype == 'vbox':
w = VBox()
elif wtype == 'hbox':
w = HBox()
elif wtype == 'hscale':
w = Slider(orientation='horizontal')
elif wtype == 'vscale':
w = Slider(orientation='vertical')
elif wtype == 'checkbutton':
w = CheckBox(title)
elif wtype == 'radiobutton':
w = RadioButton(title)
elif wtype == 'togglebutton':
w = ToggleButton(title)
elif wtype == 'button':
w = Button(title)
elif wtype == 'spacer':
w = Label('')
elif wtype == 'textarea':
w = TextArea(editable=True)
elif wtype == 'toolbar':
w = Toolbar()
elif wtype == 'menubar':
w = Menubar()
else:
raise ValueError("Bad wtype=%s" % wtype)
return w
def hadjust(w, orientation):
if orientation != 'horizontal':
return w
vbox = VBox()
vbox.add_widget(w)
vbox.add_widget(Label(''), stretch=1)
return vbox
def build_info(captions, orientation='vertical'):
vbox = gtk.VBox(spacing=2)
numrows = len(captions)
numcols = reduce(lambda acc, tup: max(acc, len(tup)), captions, 0)
if (numcols % 2) != 0:
raise ValueError("Column spec is not an even number")
numcols /= 2
table = gtk.Table(rows=numrows, columns=numcols)
table.set_row_spacings(2)
table.set_col_spacings(4)
vbox.pack_start(table, expand=False)
wb = Bunch.Bunch()
row = 0
for tup in captions:
col = 0
while col < numcols:
idx = col * 2
if idx < len(tup):
title, wtype = tup[idx:idx+2]
if not title.endswith(':'):
name = name_mangle(title)
else:
name = name_mangle('lbl_'+title[:-1])
w = make_widget(title, wtype)
table.attach(w.get_widget(), col, col+1, row, row+1,
xoptions=gtk.FILL, yoptions=gtk.FILL,
xpadding=1, ypadding=1)
wb[name] = w
col += 1
row += 1
vbox.show_all()
w = wrap(vbox)
w = hadjust(w, orientation=orientation)
return w, wb
def wrap(native_widget):
wrapper = WidgetBase()
wrapper.widget = native_widget
return wrapper
def get_orientation(container):
if not hasattr(container, 'size'):
return 'vertical'
(wd, ht) = container.size
if wd < ht:
return 'vertical'
else:
return 'horizontal'
def get_oriented_box(container, scrolled=True, fill=False):
orientation = get_orientation(container)
if orientation == 'vertical':
box1 = VBox()
box2 = VBox()
else:
box1 = HBox()
box2 = VBox()
box2.add_widget(box1)
if scrolled:
box2.add_widget(Label(''), stretch=1)
sw = ScrollArea()
sw.set_widget(box2)
else:
sw = box2
return box1, sw, orientation
#END
| 26.76
| 90
| 0.598673
|
9825770f6b9d32d0471ec0e3af449e86c5cce470
| 8,822
|
py
|
Python
|
clusterSimilarSSIDSets.py
|
rc1035/directed-probe-matching
|
c724096672e778202d9e8ed197cdf7395ea1d211
|
[
"MIT"
] | 10
|
2017-08-16T12:16:52.000Z
|
2022-02-26T05:09:39.000Z
|
clusterSimilarSSIDSets.py
|
d15c0/directed-probe-matching
|
c724096672e778202d9e8ed197cdf7395ea1d211
|
[
"MIT"
] | 1
|
2019-07-10T12:00:00.000Z
|
2019-07-10T12:00:00.000Z
|
clusterSimilarSSIDSets.py
|
d15c0/directed-probe-matching
|
c724096672e778202d9e8ed197cdf7395ea1d211
|
[
"MIT"
] | 4
|
2017-11-30T11:01:06.000Z
|
2019-11-03T23:39:40.000Z
|
#!/usr/bin/env python3.6
"""
Cluster together probes belonging to tokens that share similar SSID sets.
An optimal clustering is such that every token in a cluster belongs
to the same MAC address. AND every token has been clustered.
"""
__author__ = "Richard Cosgrove"
__license__ = "MIT"
from collections import defaultdict, Counter
import csv
import decimal
from functools import partial
from itertools import combinations
from pprint import pprint
import pickle
import multiprocessing
import sys
# Local imports
from utilities import import_compressed_json
from utilities import validate_clusters, match_tokens_with_same_ssid_set
def filter_false_pos_tokens_from_cluster(token_to_probes, cluster):
""" Remove any token from a cluster that does not have the most common fingerprint.
:param token_to_probes: Dictionary of token to list of probe dictionary
:param cluster: set of tokens
"""
token_to_fingerprint = {}
# First match each token to its probe's fingerprints
for token in cluster:
fingerprints = set()
fingerprints |= {probe["fingerprint"] for probe in token_to_probes[token]}
# We only care about a token if its fingerprint is stable
# i.e. it does not change.
if len(fingerprints) == 1:
token_to_fingerprint[token] = fingerprints.pop()
if not token_to_fingerprint:
# Do nothing - no token has a stable fingerprint
return cluster
# Now remove any token whose fingerprint is not consistent with the
# most common fingerprint.
most_common_fingerprint = Counter(token_to_fingerprint.values()).most_common(1)[0][0]
return cluster - {token for token in token_to_fingerprint.keys()
if token_to_fingerprint[token] != most_common_fingerprint}
def cluster(token_to_probes, ssid_set_to_tokens, ssid_set_to_matches,
token_to_ssid_set, check_fingerprints):
"""
:param token_to_probes: Dictionary of token to list of probe dictionary
:param ssid_set_to_tokens: Dictionary of SSID set to set of tokens
:param ssid_set_to_matches: Dictionary of SSID set to set of SSID set
:param token_to_ssid_set: Dictionary of token to SSID set
:yields: a cluster upon one being generated
"""
tokens_remaining = set(token_to_ssid_set.keys())
while tokens_remaining:
cluster = set()
tokens_to_match = set()
# Start cluster with arbitrary token
start_token = tokens_remaining.pop()
tokens_to_match.add(start_token)
# Keep looping until no more tokens can be found on stack
# This is much faster than recursion
while tokens_to_match:
# Add token to cluster and remove from tokens_to_match stack
token = tokens_to_match.pop()
cluster.add(token)
tokens_remaining.discard(token)
# Get SSID set belonging to token
ssid_set = token_to_ssid_set[token]
# Get tokens with the same SSID set that have not yet been clustered
tokens_to_match |= {token
for token in ssid_set_to_tokens[ssid_set]
if token in tokens_remaining
}
# Get SSID sets "similar" to this SSID set
similar_ssid_sets = ssid_set_to_matches[ssid_set]
# Get tokens with similar SSID set that have not yet been clustered
tokens_to_match |= {token
for matched_ssid_set in similar_ssid_sets
for token in ssid_set_to_tokens[matched_ssid_set]
if token in tokens_remaining
}
if check_fingerprints:
cluster = filter_false_pos_tokens_from_cluster(token_to_probes, cluster)
yield cluster
def jaccard_worker(chunk, threshold):
intersection_cardinality = len(chunk[0].intersection(chunk[1]))
union_cardinality = len(chunk[0]) + len(chunk[1]) - intersection_cardinality
if intersection_cardinality / float(union_cardinality) >= threshold:
return chunk
def single_processor_get_similar_ssid_sets(ssid_sets, threshold):
ssid_set_to_matches = defaultdict(set)
ssid_pairs = combinations(ssid_sets, r=2)
for pair in ssid_pairs:
match = jaccard_worker(pair, threshold)
if match:
ssid_set_to_matches[match[0]].add(match[1])
ssid_set_to_matches[match[1]].add(match[0])
return ssid_set_to_matches
def get_similar_ssid_sets(ssid_sets, threshold):
"""Return a mapping of ssid set to similar ssid sets.
:param ssid_sets: Iterable of SSID sets
:param threshold: Minimum Jaccard index for two sets to be matched as similar.
"""
ssid_set_to_matches = defaultdict(set)
ssid_pairs = combinations(ssid_sets, r=2)
# Distribute calulcations to worker processes
# Significant speed-up over single process
with multiprocessing.Pool() as pool:
task = partial(jaccard_worker, threshold=threshold)
# Immediately returns an iterable
similar_ssids = pool.imap_unordered(task, ssid_pairs, chunksize=300000)
# Consumes the iterable whenever a worker process yields
for match in similar_ssids:
if match:
ssid_set_to_matches[match[0]].add(match[1])
ssid_set_to_matches[match[1]].add(match[0])
return ssid_set_to_matches
def cluster_with_threshold(token_to_probes, threshold, check_fingerprints):
"""
:param token_to_probes: Dictionary of token to list of probe dictionary
:param threshold: Minimum Jaccard index for two sets to be matched as similar.
:param check_fingerprints: Optional step to remove false positives.
:returns: Dictionary of binary classification results (true pos, false pos, etc.)
"""
print("Matching tokens with the same SSID set.")
ssid_set_to_tokens, token_to_ssid_set = match_tokens_with_same_ssid_set(token_to_probes)
print("Matching SSID sets with a Jaccard similarity index greater than", threshold)
# ssid_set_to_matches = get_similar_ssid_sets(ssid_set_to_tokens.keys(), threshold)
ssid_set_to_matches = single_processor_get_similar_ssid_sets(ssid_set_to_tokens.keys(), threshold)
print("Clustering tokens with similar SSID sets.")
clusters = cluster(token_to_probes, ssid_set_to_tokens, ssid_set_to_matches,
token_to_ssid_set, check_fingerprints)
if check_fingerprints:
print("Filtering false positive matchings from cluster by comparing device fingerprints.")
return validate_clusters(clusters, token_to_probes)
def write_results_at_various_thresholds(token_to_probes, check_fingerprints, increment_threshold_by=0.01):
"""Output to CSV results at various thresholds. Used to draw ROC curve.
:param token_to_probes: Dictionary of token to list of probe dictionary
:param check_fingerprints: Optional step to remove false positives.
"""
def drange(x, y, jump):
"""Because python doesn't support decimal steps..."""
while x <= y:
yield float(x)
x += decimal.Decimal(jump)
with open("jaccard_threshold_results.csv", "w") as f:
writer = csv.DictWriter(f, fieldnames=["tp", "fp", "tn", "fn", "tpr", "fpr", "accuracy", "clusters", "macs", "median"])
writer.writeheader()
for threshold in drange(0, 1.01, increment_threshold_by):
writer.writerow(cluster_with_threshold(token_to_probes, threshold, check_fingerprints))
def main(test_various_thresholds=False, check_fingerprints=False):
"""Cluster with default similarity threshold.
:param test_various_thresholds: Flag to be used when generating CSV for ROC curve.
:param check_fingerprints: Optional step to remove false positives.
"""
if check_fingerprints:
token_to_probes = import_compressed_json("int/token_to_probe_inc_fingerprint.json.gz")
else:
token_to_probes = import_compressed_json("int/token_to_probe.json.gz")
if test_various_thresholds:
write_results_at_various_thresholds(token_to_probes, check_fingerprints)
else:
# Use optimal threshold
results = cluster_with_threshold(token_to_probes, 0.67, check_fingerprints)
pprint(results)
return results
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--csv", help="Generate CSV of results at various thresholds.",
action="store_true")
parser.add_argument("--fingerprint", help="Check clusters against fingerprints.",
action="store_true")
args = parser.parse_args()
main(test_various_thresholds=args.csv, check_fingerprints=args.fingerprint)
| 41.41784
| 127
| 0.701428
|
941dd05ed686635b96c8007ea098060bcc9f7cc0
| 3,464
|
py
|
Python
|
tests/test_williams_percent_r.py
|
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
|
08532970f9d2b163f1223599e3ac80f6c51533e4
|
[
"MIT"
] | 635
|
2017-04-04T20:24:47.000Z
|
2022-03-28T16:00:23.000Z
|
tests/test_williams_percent_r.py
|
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
|
08532970f9d2b163f1223599e3ac80f6c51533e4
|
[
"MIT"
] | 24
|
2017-10-22T15:01:54.000Z
|
2021-01-30T19:51:00.000Z
|
tests/test_williams_percent_r.py
|
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
|
08532970f9d2b163f1223599e3ac80f6c51533e4
|
[
"MIT"
] | 183
|
2017-07-01T16:06:39.000Z
|
2022-03-07T23:29:11.000Z
|
from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import williams_percent_r
class TestWilliamsPercentR(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.data = SampleData().get_sample_close_data()
self.wpr_expected = [-33.083683607183765, -25.112724493695076,
-23.821169277798969, -19.724875811998512, -21.352693924340887,
-19.915934275888464, -17.990064959877692, -15.697363393198287,
-17.760794803209787, -19.79365685899883, -15.353458158196428,
-13.641574321742469, -15.590370653419928, -21.276270538784889,
-20.091708062667145, -31.004967520061172, -33.832632785632384,
-45.517768437141804, -43.95873137179975, -54.474589224302605,
-54.573939625525384, -45.747038593809705, -35.384027512418761,
-38.173481085212103, -42.44554833779133, -43.362628964463127,
-38.777225831104317, -38.930072602216313, -45.678257546809306,
-37.890714558654977, -42.613679786014508, -46.297286969812781,
-63.064577760794784, -48.903324417271683, -42.376767290791015,
-23.041650745127988, -18.158196408100867, -25.762323270920923,
-41.757737867787526, -40.924722965227318, -36.163546045089831,
-23.125716469239578, -19.747802827665286, -12.365303782957632,
-14.054260603744734, -10.424149789835678, -5.4948414214750123, -0.0,
-8.9262514329385247, -10.783339701948805, -6.6106228505922626,
-10.890332441727168, -22.086358425678238, -23.798242262132195,
-24.195643867023282, -18.318685517768454, -19.999999999999964,
-16.484524264424948, -26.771111960259812, -24.959877722583084,
-26.373710355368729, -25.181505540695483, -27.02330913259458,
-24.21092854413455, -25.296140619029433, -19.625525410775744,
-19.113488727550621, -25.288498280473842, -15.880779518532648,
-15.124188001528438, -23.469621704241522, -27.481849445930479,
-30.836836071837993, -28.865112724493674, -26.373710355368729,
-34.604508979747777, -35.934275888421837, -28.21551394726783,
-36.117692013756198, -25.143293847917441, -21.207489491784482,
-21.184562476117712, -29.705769965609473, -33.886129155521608,
-35.070691631639264, -33.488727550630529, -30.508215513947228,
-32.495223538402733, -33.962552541077606, -32.204814673290009,
-29.919755445166196, -29.644631257164662, -27.581199847153247,
-25.212074894917848, -23.17157050057321, -26.404279709591094,
-22.758884218570856, -21.925869316010736, -21.047000382116895,
-20.825372564004581, -21.597248758119974, -23.316774933129526,
-22.017577378677917, -29.415361100496757, -28.139090561711917,
-27.22200993504012, -26.595338173481046, -33.931983186855149,
-53.419946503630086, -56.377531524646521, -59.679021780664897,
-59.778372181887676, -58.433320596102419, -62.155139472678655,
-60.114635078333933, -62.919373328238457, -63.079862437905966,
-76.507451280091715, -76.377531524646571, -81.207489491784472,
-78.891860909438279, -82.949942682460787, -90.149025601834182,
-98.188765762323271, -96.881925869315978, -100.0, -95.903706534199458]
def test_wpr(self):
wpr = williams_percent_r.williams_percent_r(self.data)
np.testing.assert_array_equal(wpr, self.wpr_expected)
| 57.733333
| 78
| 0.720554
|
82b962fb943bba5f6f7a4f97a974b3ffb8e871e4
| 4,583
|
py
|
Python
|
tensortrade/instruments/quantity.py
|
cihilt/tensortrade
|
47b8f2f043d3cc430838aac02a915ab42dcc7b64
|
[
"Apache-2.0"
] | 7
|
2020-09-28T23:36:40.000Z
|
2022-02-22T02:00:32.000Z
|
tensortrade/instruments/quantity.py
|
cihilt/tensortrade
|
47b8f2f043d3cc430838aac02a915ab42dcc7b64
|
[
"Apache-2.0"
] | 4
|
2020-11-13T18:48:52.000Z
|
2022-02-10T01:29:47.000Z
|
tensortrade/instruments/quantity.py
|
cihilt/tensortrade
|
47b8f2f043d3cc430838aac02a915ab42dcc7b64
|
[
"Apache-2.0"
] | 3
|
2020-11-23T17:31:59.000Z
|
2021-04-08T10:55:03.000Z
|
import operator
import warnings
from typing import Union
from numbers import Number
from tensortrade.base.exceptions import InvalidNegativeQuantity, IncompatibleInstrumentOperation, \
InvalidNonNumericQuantity, QuantityOpPathMismatch
class Quantity:
"""An size of a financial instrument for use in trading."""
def __init__(self, instrument: 'Instrument', size: float = 0, path_id: str = None):
if size < 0:
raise InvalidNegativeQuantity(size)
self._size = round(size, instrument.precision)
self._instrument = instrument
self._path_id = path_id
@property
def size(self) -> float:
return self._size
@size.setter
def size(self, size: float):
self._size = size
@property
def instrument(self) -> 'Instrument':
return self._instrument
@instrument.setter
def instrument(self, instrument: 'Exchange'):
raise ValueError("You cannot change a Quantity's Instrument after initialization.")
@property
def path_id(self) -> str:
return self._path_id
@path_id.setter
def path_id(self, path_id: str):
self._path_id = path_id
@property
def is_locked(self) -> bool:
return bool(self._path_id)
def lock_for(self, path_id: str):
self._path_id = path_id
@staticmethod
def _bool_operation(left: Union['Quantity', float, int],
right: Union['Quantity', float, int],
bool_op: operator) -> bool:
right_size = right
if isinstance(right, Quantity):
if left.instrument != right.instrument:
raise IncompatibleInstrumentOperation(left, right)
right_size = right.size
if not isinstance(right_size, Number):
raise InvalidNonNumericQuantity(right_size)
boolean = bool_op(left.size, right_size)
if not isinstance(boolean, bool):
raise Exception("`bool_op` cannot return a non-bool type ({}).".format(boolean))
return boolean
@staticmethod
def _math_operation(left: Union['Quantity', float, int],
right: Union['Quantity', float, int],
op: operator) -> 'Quantity':
right_size = right
if isinstance(right, Quantity):
if left.instrument != right.instrument:
raise IncompatibleInstrumentOperation(left, right)
if left.path_id and right.path_id:
if left._path_id != right._path_id:
raise QuantityOpPathMismatch(left.path_id, right.path_id)
right_size = right.size
if not isinstance(right_size, Number):
raise InvalidNonNumericQuantity(right_size)
size = op(left.size, right_size)
return Quantity(instrument=left.instrument, size=size, path_id=left.path_id)
def __add__(self, other: Union['Quantity', float, int]) -> 'Quantity':
return Quantity._math_operation(self, other, operator.add)
def __sub__(self, other: Union['Quantity', float, int]) -> 'Quantity':
return Quantity._math_operation(self, other, operator.sub)
def __iadd__(self, other: Union['Quantity', float, int]) -> 'Quantity':
return Quantity._math_operation(self, other, operator.iadd)
def __isub__(self, other: Union['Quantity', float, int]) -> 'Quantity':
return Quantity._math_operation(self, other, operator.isub)
def __mul__(self, other: Union['Quantity', float, int]) -> 'Quantity':
return Quantity._math_operation(self, other, operator.mul)
def __truediv__(self, other: Union['Quantity', float, int]) -> 'Quantity':
return Quantity._math_operation(self, other, operator.truediv)
def __lt__(self, other: Union['Quantity', float, int]) -> bool:
return Quantity._bool_operation(self, other, operator.lt)
def __gt__(self, other: Union['Quantity', float, int]) -> bool:
return Quantity._bool_operation(self, other, operator.gt)
def __eq__(self, other: Union['Quantity', float, int]) -> bool:
return Quantity._bool_operation(self, other, operator.eq)
def __ne__(self, other: Union['Quantity', float, int]) -> bool:
return Quantity._bool_operation(self, other, operator.ne)
def __neg__(self) -> bool:
return operator.neg(self.size)
def __str__(self):
s = "{0:." + str(self.instrument.precision) + "f}" + " {1}"
s = s.format(self.size, self.instrument.symbol)
return s
def __repr__(self):
return str(self)
| 33.210145
| 99
| 0.642156
|
9aa9e5d154f39154c46c1ffab0783b630e9d65b8
| 2,198
|
py
|
Python
|
K_Mathematical_Modeling/Section 4/solutionQuestion2.py
|
oercompbiomed/CBM101
|
20010dcb99fbf218c4789eb5918dcff8ceb94898
|
[
"MIT"
] | 7
|
2019-07-03T07:41:55.000Z
|
2022-02-06T20:25:37.000Z
|
K_Mathematical_Modeling/Section 4/solutionQuestion2.py
|
oercompbiomed/CBM101
|
20010dcb99fbf218c4789eb5918dcff8ceb94898
|
[
"MIT"
] | 9
|
2019-03-14T15:15:09.000Z
|
2019-08-01T14:18:21.000Z
|
K_Mathematical_Modeling/Section 4/solutionQuestion2.py
|
oercompbiomed/CBM101
|
20010dcb99fbf218c4789eb5918dcff8ceb94898
|
[
"MIT"
] | 11
|
2019-03-12T10:43:11.000Z
|
2021-10-05T12:15:00.000Z
|
from IPython.display import display, Latex
print("If the rate of mRNA production reaches a plateau, we have reached a steady state where k_elong*Pe (the rate of mRNA production) does not depend on time anymore. ")
print(" Using the steady state solution of the model, we can see that the two variables Y that we measure, i.e. Y=dmRNA/dt and the number of nucleoplasmic polymerases Y=Pc both depend on all model parameters. As such, for both these variables, dY/dk_i is not 0 so they are sensitive to these model parameters.")
print(" However, for instance if k_p is multiplied by 3, ans simultaneously k_q is also multiplied by 3, both dmRNA/dt and P_c don't change: hence, those measurable quantities do not depend independently on the different parameters (in fact, they just depend on a huge function of all parameters): in this case, the parameters are also non-iodentifiable, individually. But we can remark that they are related by:")
display(Latex('$ dmRNA/dt=Pc*[(k_{elong}*k_{on})/(k_{off}+k_{elong})]$'))
print(" Hence, combining these two measurements yields the ratio")
display(Latex('$ [(k_{elong}*k_{on})/(k_{off}+k_{elong})]$'))
print(" Individually, the parameters are non-identifiable, but collectively they are constrained by the measurements. If k_on and k_off are known from previous studies in the same context, k_elong becomes identifiable")
print("In the regime where k_elong is very fast compare to the on and off dynamics (which is not realistic - just to make the point), the later ratio becomes approximately k_on. In this regime of very fast transcription, measuring the ratio of the rate of mRNA production to the mobile, nucleoplasmic pool of polymerase complexes is a direct readout for k_on, that becomes identifiable. This is an important notion to have in mind: with a given set of available data, and a given model for interpretation, the identifiability of certain parameters from the data might depend on the parameters themselves. Hence, a first estimation of the parameters with a global fitting might indicate in which parameter regime the model has to be considered, and then the identifiability of all parameters has to be studied in this regime.")
| 244.222222
| 826
| 0.784349
|
dd3ac601c8505493aa40f0ccb0aa018f71581644
| 1,804
|
py
|
Python
|
setup.py
|
yadage/packtivity
|
e7020f549bb0933afc1f63c399ee0926d113c23c
|
[
"MIT"
] | 1
|
2021-08-23T03:45:20.000Z
|
2021-08-23T03:45:20.000Z
|
setup.py
|
lukasheinrich/packtivity
|
e7020f549bb0933afc1f63c399ee0926d113c23c
|
[
"MIT"
] | 12
|
2019-07-23T09:13:53.000Z
|
2022-02-03T05:47:36.000Z
|
setup.py
|
lukasheinrich/packtivity
|
e7020f549bb0933afc1f63c399ee0926d113c23c
|
[
"MIT"
] | 2
|
2020-03-24T10:45:28.000Z
|
2020-05-30T05:49:34.000Z
|
import os
from setuptools import setup, find_packages
deps = [
"requests[security]",
"jsonschema",
"jsonref",
"pyyaml",
"click",
"glob2",
"jsonpointer",
"jsonpath-rw",
"jq",
"yadage-schemas",
"mock",
"checksumdir",
]
if not "READTHEDOCS" in os.environ:
deps += ["jq"]
setup(
name="packtivity",
version="0.14.24",
description="packtivity - general purpose schema + bindings for PROV activities",
url="https://github.com/yadage/packtivity",
author="Lukas Heinrich",
author_email="lukas.heinrich@cern.ch",
packages=find_packages(),
python_requires=">=3.6",
include_package_data=True,
install_requires=deps,
extras_require={"celery": ["celery", "redis"]},
entry_points={
"console_scripts": [
"packtivity-run=packtivity.cli:runcli",
"packtivity-util=packtivity.cli:utilcli",
"packtivity-validate=packtivity.cli:validatecli",
"packtivity-checkproxy=packtivity.cli:checkproxy",
],
},
dependency_links=[],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics",
"Operating System :: OS Independent",
],
)
| 29.57377
| 85
| 0.60255
|
674e20c95d17c717bd0c30e5dae92507482c8608
| 4,978
|
py
|
Python
|
tests/test_ants.py
|
BlueBrain/atlas-annotation
|
118af9b95518a19b64a9d8008aabed557eb0f646
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ants.py
|
BlueBrain/atlas-annotation
|
118af9b95518a19b64a9d8008aabed557eb0f646
|
[
"Apache-2.0"
] | 8
|
2021-11-02T17:23:22.000Z
|
2022-03-02T12:29:26.000Z
|
tests/test_ants.py
|
BlueBrain/atlas-annotation
|
118af9b95518a19b64a9d8008aabed557eb0f646
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, Blue Brain Project, EPFL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nibabel
import numpy as np
import pytest
import atlannot.ants
def test_register(monkeypatch):
# 2D registration
fixed = np.random.randn(10, 20).astype(np.float32)
moving = np.random.randn(10, 20).astype(np.float32)
nii = atlannot.ants.register(fixed, moving)
assert isinstance(nii, np.ndarray)
assert nii.shape == (10, 20, 1, 1, 2)
# 2D registration - moving and fixed uint8
fixed = np.random.randint(100, size=(10, 20)).astype(np.uint8)
moving = np.random.randint(100, size=(10, 20)).astype(np.uint8)
nii = atlannot.ants.register(fixed, moving)
assert isinstance(nii, np.ndarray)
assert nii.shape == (10, 20, 1, 1, 2)
# 3D registration
fixed = np.random.randn(5, 10, 20).astype(np.float32)
moving = np.random.randn(5, 10, 20).astype(np.float32)
nii = atlannot.ants.register(fixed, moving)
assert isinstance(nii, np.ndarray)
assert nii.shape == (5, 10, 20, 1, 3)
# Different shapes
fixed = np.random.randn(10, 20).astype(np.float32)
moving = np.random.randn(30, 40).astype(np.float32)
with pytest.raises(ValueError, match="shape"):
atlannot.ants.register(fixed, moving)
# Wrong fixed image dtype
fixed = np.random.randn(10, 20).astype(np.float16)
moving = np.random.randn(10, 20).astype(np.float32)
with pytest.raises(TypeError, match="Unsupported dtype"):
atlannot.ants.register(fixed, moving)
# Wrong moving image dtype
fixed = np.random.randn(10, 20).astype(np.float32)
moving = np.random.randn(10, 20).astype(np.float16)
with pytest.raises(TypeError, match="Unsupported dtype"):
atlannot.ants.register(fixed, moving)
# Wrong affine part
def mock_nibabel_load(_):
affine = np.diag([1.0, 2.0, 3.0, 4.0])
nii = nibabel.Nifti1Image(
dataobj=np.random.randn(10, 20, 1, 1, 2),
affine=affine,
)
return nii
monkeypatch.setattr("atlannot.ants.nibabel.load", mock_nibabel_load)
fixed = np.random.randn(10, 20).astype(np.float32)
moving = np.random.randn(10, 20).astype(np.float32)
with pytest.raises(RuntimeError, match="affine"):
atlannot.ants.register(fixed, moving)
def test_transform(monkeypatch):
# 2D - float32
image = np.random.randn(10, 20).astype(np.float32)
nii_data = np.random.randn(10, 20, 1, 1, 2)
atlannot.ants.transform(image, nii_data)
# 2D - uint32
image = np.random.randint(1000, size=(10, 20)).astype(np.uint32)
nii_data = np.random.randn(10, 20, 1, 1, 2)
atlannot.ants.transform(image, nii_data)
# 2D - uint8
image = np.random.randint(100, size=(10, 20)).astype(np.uint8)
nii_data = np.random.randn(10, 20, 1, 1, 2)
atlannot.ants.transform(image, nii_data)
# 2D - atlas float32
atlas = np.random.randint(5, size=(10, 20)).astype(np.float32)
nii_data = np.random.randn(10, 20, 1, 1, 2)
atlannot.ants.transform(atlas, nii_data, interpolator="genericLabel")
# 2D - atlas uint32
atlas = np.random.randint(5, size=(10, 20)).astype(np.uint32)
nii_data = np.random.randn(10, 20, 1, 1, 2)
atlannot.ants.transform(atlas, nii_data, interpolator="genericLabel")
# 3D - float32
image = np.random.randn(5, 10, 20).astype(np.float32)
nii_data = np.random.randn(5, 10, 20, 1, 3)
atlannot.ants.transform(image, nii_data)
# 3D - uint32
image = np.random.randint(1000, size=(5, 10, 20)).astype(np.uint32)
nii_data = np.random.randn(5, 10, 20, 1, 3)
atlannot.ants.transform(image, nii_data)
# Wrong image dtype
image = np.random.randn(5, 10, 20).astype(np.float16)
nii_data = np.random.randn(5, 10, 20, 3)
with pytest.raises(TypeError, match="Unsupported dtype"):
atlannot.ants.transform(image, nii_data)
# Error during transform
def mock_apply_transforms(*_1, **_2):
return 1
monkeypatch.setattr("atlannot.ants.ants.apply_transforms", mock_apply_transforms)
image = np.random.randn(5, 10, 20).astype(np.float32)
nii_data = np.random.randn(5, 1, 1, 1, 3)
with pytest.raises(RuntimeError):
atlannot.ants.transform(image, nii_data)
def test_stack_2d_transforms():
nii_data_array = [np.random.randn(10, 20, 1, 1, 2) for _ in range(5)]
nii_data_3d = atlannot.ants.stack_2d_transforms(nii_data_array)
assert nii_data_3d.shape == (5, 10, 20, 1, 3)
| 36.874074
| 85
| 0.675372
|
e5dd5684990fe6db3c1d73c5eee7e9926e8e9351
| 3,655
|
py
|
Python
|
python/util/evaluate.py
|
debajyotidatta/multiNLI_mod
|
d94e30ddd628a2df65859424ebec7d212d3227b5
|
[
"Apache-2.0"
] | null | null | null |
python/util/evaluate.py
|
debajyotidatta/multiNLI_mod
|
d94e30ddd628a2df65859424ebec7d212d3227b5
|
[
"Apache-2.0"
] | null | null | null |
python/util/evaluate.py
|
debajyotidatta/multiNLI_mod
|
d94e30ddd628a2df65859424ebec7d212d3227b5
|
[
"Apache-2.0"
] | 1
|
2018-09-06T14:00:47.000Z
|
2018-09-06T14:00:47.000Z
|
import csv
import sys
def evaluate_classifier(classifier, eval_set, batch_size):
"""
Function to get accuracy and cost of the model, evaluated on a chosen dataset.
classifier: the model's classfier, it should return genres, logit values, and cost for a given minibatch of the evaluation dataset
eval_set: the chosen evaluation set, for eg. the dev-set
batch_size: the size of minibatches.
"""
correct = 0
genres, hypotheses, cost = classifier(eval_set)
cost = cost / batch_size
full_batch = int(len(eval_set) / batch_size) * batch_size
for i in range(full_batch):
hypothesis = hypotheses[i]
if hypothesis == eval_set[i]['label']:
correct += 1
return correct / float(len(eval_set)), cost
def evaluate_classifier_genre(classifier, eval_set, batch_size):
"""
Function to get accuracy and cost of the model by genre, evaluated on a chosen dataset. It returns a dictionary of accuracies by genre and cost for the full evaluation dataset.
classifier: the model's classfier, it should return genres, logit values, and cost for a given minibatch of the evaluation dataset
eval_set: the chosen evaluation set, for eg. the dev-set
batch_size: the size of minibatches.
"""
genres, hypotheses, cost = classifier(eval_set)
correct = dict((genre,0) for genre in set(genres))
count = dict((genre,0) for genre in set(genres))
cost = cost / batch_size
full_batch = int(len(eval_set) / batch_size) * batch_size
for i in range(full_batch):
hypothesis = hypotheses[i]
genre = genres[i]
if hypothesis == eval_set[i]['label']:
correct[genre] += 1.
count[genre] += 1.
if genre != eval_set[i]['genre']:
print 'welp!'
accuracy = {k: correct[k]/count[k] for k in correct}
return accuracy, cost
def evaluate_final(restore, classifier, eval_sets, batch_size):
"""
Function to get percentage accuracy of the model, evaluated on a set of chosen datasets.
restore: a function to restore a stored checkpoint
classifier: the model's classfier, it should return genres, logit values, and cost for a given minibatch of the evaluation dataset
eval_set: the chosen evaluation set, for eg. the dev-set
batch_size: the size of minibatches.
"""
restore(best=True)
percentages = []
for eval_set in eval_sets:
genres, hypotheses, cost = classifier(eval_set)
correct = 0
cost = cost / batch_size
full_batch = int(len(eval_set) / batch_size) * batch_size
for i in range(full_batch):
hypothesis = hypotheses[i]
if hypothesis == eval_set[i]['label']:
correct += 1
percentages.append(correct / float(len(eval_set)))
return percentages
def predictions_kaggle(classifier, eval_set, batch_size, name):
"""
Get comma-separated CSV of predictions.
Output file has two columns: pairID, prediction
"""
INVERSE_MAP = {
0: "entailment",
1: "neutral",
2: "contradiction"
}
hypotheses = classifier(eval_set)
predictions = []
for i in range(len(eval_set)):
hypothesis = hypotheses[i]
prediction = INVERSE_MAP[hypothesis]
pairID = eval_set[i]["pairID"]
predictions.append((pairID, prediction))
#predictions = sorted(predictions, key=lambda x: int(x[0]))
f = open( name + '_predictions.csv', 'wb')
w = csv.writer(f, delimiter = ',')
w.writerow(['pairID','gold_label'])
for example in predictions:
w.writerow(example)
f.close()
| 35.144231
| 180
| 0.65554
|
e5e98173f71b8a669bf070e2eacecc08563ae35c
| 3,100
|
py
|
Python
|
authors/apps/authentication/tests_/test_model.py
|
MuhweziDeo/Ah-backend-xmen
|
60c830977fa39a7eea9ab978a9ba0c3beb0c4d88
|
[
"BSD-3-Clause"
] | 4
|
2019-01-07T09:15:17.000Z
|
2020-11-09T09:58:54.000Z
|
authors/apps/authentication/tests_/test_model.py
|
MuhweziDeo/Ah-backend-xmen
|
60c830977fa39a7eea9ab978a9ba0c3beb0c4d88
|
[
"BSD-3-Clause"
] | 34
|
2019-01-07T15:30:14.000Z
|
2019-03-06T08:23:34.000Z
|
authors/apps/authentication/tests_/test_model.py
|
MuhweziDeo/Ah-backend-xmen
|
60c830977fa39a7eea9ab978a9ba0c3beb0c4d88
|
[
"BSD-3-Clause"
] | 10
|
2018-12-18T14:43:52.000Z
|
2020-02-07T08:27:50.000Z
|
from authors.apps.authentication.models import User, UserManager
from django.test import TestCase
class TestModel(TestCase):
def test_create_super_user_with_no_password_fail(self):
"""
Test raise error if a superuser is created without a password
Returns:
A message that password is required
Raises:
TypeError if password is not provided
"""
with self.assertRaises(TypeError):
user = User.objects.create_superuser(username='superadmin',
password=None, email='supper@admin.com')
def test_create_super_user(self):
"""
Test successful creation of a superuser
Returns:
A superuser should have is.staff set to true
"""
user = User.objects.create_superuser(username='superadmin',
password='superadminpassword', email='supper@admin.com')
self.assertEqual(user.is_staff, True)
def test_create_user_with_no_email(self):
"""
Test raise error if a user is created without an email
Returns:
A message that email is required
Raises:
TypeError if email is not provided
"""
with self.assertRaises(TypeError):
user = User.objects.create_user(
username="aggrey", email=None, password='randompassword')
def test_create_user_with_no_username(self):
"""
Test raise error if a user is created without an username
Returns:
A message that username is required
Raises:
TypeError if username is not provided
"""
with self.assertRaises(TypeError):
user = User.objects.create_user(
username=None, email='aggrey90@gmail.com', password='randompassword')
def test_return_str__method(self):
"""
Test __str__ method on user model
Returns:
when __str__ is called is should return a users email
"""
self.user = User.objects.create_user(
username="aggrey", email="aggrey256@gmail.com", password='randompassword')
self.assertEqual(self.user.__str__(), 'aggrey256@gmail.com')
def test_return_short_name__method(self):
"""
Test get_short_name method on user model
Returns:
when get_short_name is called is should return a users username
"""
self.user = User.objects.create_user(
username="aggrey", email="aggrey256@gmail.com", password='randompassword')
self.assertEqual('aggrey', self.user.get_short_name())
def test_return_full_name__method(self):
"""
Test get_full_name method on user model
Returns:
when get_full_name is called is should return a users username
"""
self.user = User.objects.create_user(
username="aggrey", email="aggrey256@gmail.com", password='randompassword')
self.assertEqual('aggrey', self.user.get_full_name)
| 32.978723
| 101
| 0.617097
|
16f0a10b8a8615461fff4d7b5193b1442c7c8151
| 1,186
|
py
|
Python
|
Wizard.py
|
Pokesi/RandomX-mining-presets
|
c401593adf6bedd9bd019dea728c1887ed39bfc3
|
[
"Unlicense"
] | 1
|
2021-05-11T08:38:41.000Z
|
2021-05-11T08:38:41.000Z
|
Wizard.py
|
Pokesi/RandomX-mining-presets
|
c401593adf6bedd9bd019dea728c1887ed39bfc3
|
[
"Unlicense"
] | null | null | null |
Wizard.py
|
Pokesi/RandomX-mining-presets
|
c401593adf6bedd9bd019dea728c1887ed39bfc3
|
[
"Unlicense"
] | null | null | null |
import sys, string, os
def separator():
printf("=========================================")
separator()
printf(" ==> RandomX Mining Presets Wizard <== ")
separator()
printf(" /===========\ /==\")
printf(" | [-----] | | |")
printf(" | | | | | | |==|")
printf(" | [-----] | | | /==/ |==|")
printf(" | /========/ | |/ / ")
printf(" | | /=========\ | / / /=========\ /========\ |==|")
printf(" | | | /---\ | | \ \ | /---\ | / _____/ | |")
printf(" | | | | | | | |\ \ | \---/ | | /_____ | |")
printf(" | | | \---/ | | | \ \ | ______/ |___ / | |")
printf(" |==| \=========/ | | \ \ \=========\ \=======/ |==|")
separator()
separator()
printf("What currency do you want to mine? (full name, no spaces) >>> ")
currency = input("")
for file in os.listdir("/Users/darren/Desktop/test"):
if file.startswith("00 " + currency):
f = file
os.system(f)
| 45.615385
| 84
| 0.273187
|
c383baa0146e15e9d6a93e4aab102f02371848a0
| 1,454
|
py
|
Python
|
docs/components/gallery.py
|
elben10/dash-data-table
|
d452d18c9bd43698d2b1fa0f12cd5adb43c7671c
|
[
"Apache-2.0"
] | null | null | null |
docs/components/gallery.py
|
elben10/dash-data-table
|
d452d18c9bd43698d2b1fa0f12cd5adb43c7671c
|
[
"Apache-2.0"
] | 11
|
2021-02-09T09:53:23.000Z
|
2021-02-25T10:26:31.000Z
|
docs/components/gallery.py
|
elben10/dash-data-table
|
d452d18c9bd43698d2b1fa0f12cd5adb43c7671c
|
[
"Apache-2.0"
] | null | null | null |
import dash_core_components as dcc
import dash_html_components as html
DEFAULT_IMAGE = "https://images.unsplash.com/photo-1555861496-0666c8981751?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=2550&q=80"
def gallery(items):
return html.Div([gallery_item(item) for item in items], className="row")
def gallery_item(item):
return html.Div(
html.Div(
[
html.Img(
src=item.get("img", DEFAULT_IMAGE,),
className="img-fluid card-img-top",
),
html.Div(
[
html.H5(
dcc.Link(item.get("title"), href="/", className="text-dark")
),
html.P(
item.get("description"), className="small text-muted mb-0",
),
dcc.Link(
html.P("Go to example", className="mb-0"),
className="d-flex align-items-center justify-content-center rounded-pill bg-light px-3 py-2 mt-4",
href=item.get("href"),
),
],
className="p-4",
),
],
className="bg-white rounded shadow-sm",
),
className="col-xl-3 col-lg-4 col-md-6 mb-4",
)
| 36.35
| 175
| 0.466988
|
cdb8205e724212277bca2df471ee19f189888c3a
| 624
|
py
|
Python
|
build/lib/SortingAlgos/MergeSort.py
|
Geeky-star/SortingAlgorithms
|
ff79a3a7879154b4ffe0ec83a3640c5ed31799be
|
[
"MIT"
] | null | null | null |
build/lib/SortingAlgos/MergeSort.py
|
Geeky-star/SortingAlgorithms
|
ff79a3a7879154b4ffe0ec83a3640c5ed31799be
|
[
"MIT"
] | null | null | null |
build/lib/SortingAlgos/MergeSort.py
|
Geeky-star/SortingAlgorithms
|
ff79a3a7879154b4ffe0ec83a3640c5ed31799be
|
[
"MIT"
] | null | null | null |
def MergeSort(a):
if len(a)>1:
mid=len(a)//2
l=a[:mid]
r=a[mid:]
MergeSort(l)
MergeSort(r)
i=j=k=0
while i<len(l) and j<len(r):
if l[i]<r[j]:
a[k] = l[i]
i+=1
else:
a[k]=r[j]
j+=1
k+=1
while i<len(l):
a[k]=l[i]
i+=1
k+=1
while j<len(r):
a[k]=r[j]
j+=1
k+=1
return a
| 17.333333
| 36
| 0.235577
|
9ca1c5ede4ed8f8b8d9eff2b5abdd52c616996d5
| 2,801
|
py
|
Python
|
tempest/api/volume/test_volumes_clone.py
|
cityofships/tempest
|
59aa6811a3664d88b8939603b8e974644fbe21fa
|
[
"Apache-2.0"
] | 254
|
2015-01-05T19:22:52.000Z
|
2022-03-29T08:14:54.000Z
|
tempest/api/volume/test_volumes_clone.py
|
cityofships/tempest
|
59aa6811a3664d88b8939603b8e974644fbe21fa
|
[
"Apache-2.0"
] | 13
|
2015-03-02T15:53:04.000Z
|
2022-02-16T02:28:14.000Z
|
tempest/api/volume/test_volumes_clone.py
|
cityofships/tempest
|
59aa6811a3664d88b8939603b8e974644fbe21fa
|
[
"Apache-2.0"
] | 367
|
2015-01-07T15:05:39.000Z
|
2022-03-04T09:50:35.000Z
|
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common import utils
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
class VolumesCloneTest(base.BaseVolumeTest):
"""Test volume clone"""
@classmethod
def skip_checks(cls):
super(VolumesCloneTest, cls).skip_checks()
if not CONF.volume_feature_enabled.clone:
raise cls.skipException("Cinder volume clones are disabled")
def _verify_volume_clone(self, source_volume, cloned_volume,
bootable='false', extra_size=0):
cloned_vol_details = self.volumes_client.show_volume(
cloned_volume['id'])['volume']
self.assertEqual(source_volume['id'],
cloned_vol_details['source_volid'])
self.assertEqual(source_volume['size'] + extra_size,
cloned_vol_details['size'])
self.assertEqual(bootable, cloned_vol_details['bootable'])
@decorators.idempotent_id('9adae371-a257-43a5-9555-dc7c88e66e0e')
def test_create_from_volume(self):
"""Test cloning a volume with increasing size"""
# Creates a volume from another volume passing a size different from
# the source volume.
src_size = CONF.volume.volume_size
extend_size = CONF.volume.volume_size_extend
src_vol = self.create_volume(size=src_size)
# Destination volume bigger than source
dst_vol = self.create_volume(source_volid=src_vol['id'],
size=src_size + extend_size)
self._verify_volume_clone(src_vol, dst_vol, extra_size=extend_size)
@decorators.idempotent_id('cbbcd7c6-5a6c-481a-97ac-ca55ab715d16')
@utils.services('image')
def test_create_from_bootable_volume(self):
"""Test cloning a bootable volume"""
# Create volume from image
img_uuid = CONF.compute.image_ref
src_vol = self.create_volume(imageRef=img_uuid)
# Create a volume from the bootable volume
cloned_vol = self.create_volume(source_volid=src_vol['id'])
self._verify_volume_clone(src_vol, cloned_vol, bootable='true')
| 38.369863
| 78
| 0.688683
|
03bcb3fc8af01843b6c87e94c846c74d46e62f65
| 1,160
|
py
|
Python
|
FaceRecognizer/utils/httpreq.py
|
r4tylmz/yolo-dlib-face-recognition
|
a7b63d3d9735215c31a441d39c925f119e5662d0
|
[
"MIT"
] | 3
|
2021-06-28T19:00:55.000Z
|
2022-01-14T19:49:58.000Z
|
FaceRecognizer/utils/httpreq.py
|
r4tylmz/yolo-dlib-face-recognition
|
a7b63d3d9735215c31a441d39c925f119e5662d0
|
[
"MIT"
] | 2
|
2022-03-10T17:46:07.000Z
|
2022-03-11T17:50:02.000Z
|
FaceRecognizer/utils/httpreq.py
|
r4tylmz/yolo-dlib-face-recognition
|
a7b63d3d9735215c31a441d39c925f119e5662d0
|
[
"MIT"
] | null | null | null |
import requests
from constants import constants
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def get_staff_credentials_by_id(id) -> str:
response = requests.get(f'{constants.BASEURL}/Staff/{id}', verify=False).json()
formatted_fullname = f"{response['name']}_{response['lastName']}_{response['id']}"
return formatted_fullname
def send_staff_credentials(name, lastname, phone):
data = {"name": name, "lastName": lastname, "phoneNumber": phone}
response = requests.post(f"{constants.BASEURL}/Staff/", json=data, verify=False)
index = response.json()["id"]
print(f"[INFO] HTTP CODE: {response.status_code} | USER ID: {index} SUCCESSFULLY CREATED")
return index
def send_staff_activity(room_id, staff_id, entry, exit):
data = {"roomId": room_id, "staffId": staff_id,
"entryTime": entry.strftime("%Y-%m-%dT%H:%M:%S.%f"),
"exitTime": exit.strftime("%Y-%m-%dT%H:%M:%S.%f")}
response = requests.post('{constants.BASEURL}/StaffActivity', json=data, verify=False)
print(f"[INFO] HTTP CODE: {response.status_code} | USER ID: {staff_id} SUCCESSFULLY SENT TO API")
| 50.434783
| 101
| 0.700862
|
1c7feff5bff8e7604025a01ea5c0efdc499fee4f
| 26,220
|
py
|
Python
|
tools/analysis/base.py
|
GiacobboNicola/PeTar
|
ed40946abbe346e2b0e72ae836add7e38bf851c1
|
[
"MIT"
] | null | null | null |
tools/analysis/base.py
|
GiacobboNicola/PeTar
|
ed40946abbe346e2b0e72ae836add7e38bf851c1
|
[
"MIT"
] | null | null | null |
tools/analysis/base.py
|
GiacobboNicola/PeTar
|
ed40946abbe346e2b0e72ae836add7e38bf851c1
|
[
"MIT"
] | null | null | null |
# base class and functions
import numpy as np
class DictNpArrayMix:
""" The basic class of data structure
The member functions are initialized by provided keys in initial function
Member functions can be accessed by using the stype of either Dictonary or numpy.ndarray
"""
def __init__(self, keys, _dat=None, _offset=int(0), _append=False, **kwargs):
"""
Parameters
----------
keys: list of class member name and the corresponding types or numpy.ndarray shape
Class members list description. Defined by inherited types
For exmaple: keys=[['mass',numpy.float64],['pos',(numpy.float64,3)],['sub1',typename],['sub2',(typename,kwargs)]], will provide class members: mass (1D numpy.ndarray), pos ( 2D numpy.ndarray with a shape of (*,3)), sub1 (a class instance with the type of typename) and sub2 (a type based on DictNpArrayMix with additional keyword arguments, kwargs)
_dat: numpy.ndarray | type(self) | None
If it is 2D numpy.ndarray type data, read data as readArray function
If it is the same class type, copy the data
If it is None, initial class with empty data
_offset: int (0)
Reading column offset of _dat if it is 2D np.ndarray
_append: bool (False)
If true, append keys and ncols to the current class instead of create new class members
kwargs: dict ()
keyword arguments, defined by inherited types
"""
self.initargs = kwargs.copy()
if (_append): self.keys = self.keys + keys
else: self.keys = keys.copy()
if (issubclass(type(_dat), DictNpArrayMix)):
icol = int(0)
for key, parameter in keys:
if (type(parameter) == type):
if (issubclass(parameter, DictNpArrayMix)):
self.__dict__[key] = parameter(_dat.__dict__[key], **kwargs)
icol += self.__dict__[key].ncols
else:
self.__dict__[key] = _dat.__dict__[key].copy()
icol += 1
#raise ValueError('Initial fail, unknown key type, should be inherience of DictNpArrayMix, given ',parameter)
elif (type(parameter)==tuple):
if (type(parameter[0]) == type) & (type(parameter[1]) == int):
self.__dict__[key] = _dat.__dict__[key].copy()
icol += parameter[1]
elif (type(parameter[0]) == type) & (type(parameter[1])==dict):
if(issubclass(parameter[0], DictNpArrayMix)):
self.__dict__[key] = parameter[0](_dat.__dict__[key], **{**kwargs, **parameter[1]})
icol += self.__dict__[key].ncols
else:
self.__dict__[key] = _dat.__dict__[key].copy()
icol += 1
else:
raise ValueError('Initial fail, unknown key type ',parameter[0],' and column count ', parameter[1] )
else:
raise ValueError('Initial fail, unknown key parameter, should be DictNpArrayMix type name or value of int, given ',parameter)
if (_append): self.ncols += int(icol)
else: self.ncols = int(icol)
self.size = _dat.size
elif (type(_dat)==np.ndarray):
icol = _offset
self.size = int(0)
for key, parameter in keys:
if (type(parameter) == type):
if (issubclass(parameter, DictNpArrayMix)):
self.__dict__[key] = parameter(_dat, icol, False, **kwargs)
icol += self.__dict__[key].ncols
self.size += self.__dict__[key].size*self.__dict__[key].ncols
else:
self.__dict__[key] = _dat[:,icol].astype(parameter)
icol += 1
self.size += self.__dict__[key].size
#raise ValueError('Initial fail, unknown key type, should be inherience of DictNpArrayMix, given ',parameter)
elif (type(parameter)==tuple):
if (type(parameter[0]) == type) & (type(parameter[1]) == int):
self.__dict__[key] = _dat[:,icol:icol+parameter[1]].astype(parameter[0])
icol += parameter[1]
self.size += self.__dict__[key].size
elif (type(parameter[0]) == type) & (type(parameter[1])==dict):
if(issubclass(parameter[0], DictNpArrayMix)):
self.__dict__[key] = parameter[0](_dat, icol, False, **{**kwargs, **parameter[1]})
icol += self.__dict__[key].ncols
self.size += self.__dict__[key].size*self.__dict__[key].ncols
else:
self.__dict__[key] = _dat[:,icol].astype(parameter[0])
icol += 1
self.size += self.__dict__[key].size
else:
raise ValueError('Initial fail, unknown key type ',parameter[0],' and column count ', parameter[1] )
else:
raise ValueError('Initial fail, unknown key parameter, should be DictNpArrayMix type name or value of int, given ',parameter)
icol -= _offset
if (_append): self.ncols += int(icol)
else: self.ncols = int(icol)
self.size = int(self.size/icol)
if (self.size != _dat.shape[0]):
raise ValueError('Reading error, final counted size ',self.size,' is not consistent with reading ndarray shape',_dat.shape[0])
elif (_dat==None):
icol = int(0)
for key, parameter in keys:
if (type(parameter) == type):
if (issubclass(parameter, DictNpArrayMix)):
self.__dict__[key] = parameter(**kwargs)
icol += self.__dict__[key].ncols
else:
self.__dict__[key] = np.empty(0).astype(parameter)
icol += 1
#raise ValueError('Initial fail, unknown key type, should be inherience of DictNpArrayMix, given b',parameter)
elif (type(parameter)==tuple):
if (type(parameter[0]) == type) & (type(parameter[1]) == int):
self.__dict__[key] = np.empty([0,parameter[1]]).astype(parameter[0])
icol += parameter[1]
elif (type(parameter[0]) == type) & (type(parameter[1])==dict):
if(issubclass(parameter[0], DictNpArrayMix)):
self.__dict__[key] = parameter[0](**{**kwargs, **parameter[1]})
icol += self.__dict__[key].ncols
else:
self.__dict__[key] = np.empty(0).astype(parameter[0])
icol += 1
else:
raise ValueError('Initial fail, unknown key type ',parameter[0],' and column count ', parameter[1] )
else:
raise ValueError('Initial fail, unknown key parameter, should be DictNpArrayMix type name or value of int, given ',parameter)
if (_append): self.ncols += int(icol)
else: self.ncols = int(icol)
self.size = int(0)
else:
raise ValueError('Initial fail, date type should be ',type(self),' or np.ndarray, given ',type(_dat))
def readArray(self, _dat, _offset=int(0),**kwargs):
""" Read class member data from a 2D numpy.ndarray
Parameters
----------
_dat: numpy.ndarray
Read 2D array, rows are the event, columns are members. The class members are filled in the order of items in keys provided in the initial function.
For exmaple: if keys are [['mass',1],['pos',3]], the member mass = _dat[:,_offset] and pos = _dat[:,_offset+1:_offset+3]
_offset: int (0)
Reading column offset of _dat if it is 2D np.ndarray
kwaygs: dict ()
keyword arguments
"""
icol = _offset
self.size = int(0)
for key, parameter in self.keys:
if (type(parameter) == type):
if (issubclass(parameter, DictNpArrayMix)):
self.__dict__[key].readArray(_dat, icol, **kwargs)
icol += self.__dict__[key].ncols
self.size += self.__dict__[key].size*self.__dict__[key].ncols
else:
self.__dict__[key] = _dat[:,icol].astype(parameter)
icol += 1
self.size += self.__dict__[key].size
#raise ValueError('Initial fail, unknown key type, should be inherience of DictNpArrayMix, given ',parameter)
elif (type(parameter)==tuple):
if (type(parameter[0]) == type) & (type(parameter[1]) == int):
self.__dict__[key] = _dat[:,icol:icol+parameter[1]].astype(parameter[0])
icol += parameter[1]
self.size += self.__dict__[key].size
elif (type(parameter[0]) == type) & (type(parameter[1])==dict):
if(issubclass(parameter[0], DictNpArrayMix)):
self.__dict__[key].readArray(_dat, icol, **kwargs,**parameter[1])
icol += self.__dict__[key].ncols
self.size += self.__dict__[key].size*self.__dict__[key].ncols
else:
self.__dict__[key] = _dat[:,icol].astype(parameter[0])
icol += 1
self.size += self.__dict__[key].size
else:
raise ValueError('Initial fail, unknown key type ',parameter[0],' and column count ', parameter[1] )
else:
raise ValueError('Initial fail, unknown key parameter, should be DictNpArrayMix type name or value of int, given ',parameter)
icol -= _offset
self.size = int(self.size/icol)
if (self.size != _dat.shape[0]):
raise ValueError('Reading error, final counted size ',self.size,' is not consistent with reading ndarray shape',_dat.shape[0])
if (self.ncols != icol):
raise ValueError('Column number inconsistence, self ncols ',self.ncols,' key ncols ', icol)
def __getitem__(self, k):
""" Map getitem to all members generated from the keys in the initial function, and return a new data filtered by k
If the member is an inherited type of DictNpArrayMix, also map all sub-members if it.
Parameters
----------
k: filter
The same type of arguments for numpy.ndarray.__getitem__
"""
if (type(k)==str):
return self.__dict__[k]
else:
cls_type = type(self)
new_dat = cls_type(**self.initargs)
new_dat.ncols = self.ncols
new_dat.size = int(0)
new_dat.keys = self.keys.copy()
icol = int(0)
for key_type in new_dat.keys:
key = key_type[0]
item = self.__dict__[key]
if (type(item) == np.ndarray):
if item.shape[0]!=self.size:
raise ValueError('Member ',key,' size/dimension',item.shape, ' is not consistent with the data size',self.size)
new_dat.__dict__[key] = item[k]
new_dat.size += new_dat.__dict__[key].size
if (len(item.shape)>1): icol += item.shape[1]
else: icol += 1
elif (issubclass(type(item), DictNpArrayMix)):
new_item = item[k]
new_dat.__dict__[key] = new_item
new_dat.size += new_item.size*new_item.ncols
icol += new_item.ncols
new_dat.size = int(new_dat.size/new_dat.ncols)
if (icol != new_dat.ncols):
raise ValueError('Column number inconsistent, counted:',icol,' saved ncols:',new_dat.ncols,'keys:',new_dat.keys,'fileter: ',k,' original size:',self.size,' original ncols:',self.ncols)
return new_dat
def __setitem__(self, k, data):
""" Map setitem to all members generated from the keys in the initial function
If the member is an inherited type of DictNpArrayMix, also map all sub-members if it.
Parameters
----------
k: filter
The same type of arguments for numpy.ndarray.__getitem__
data: numpy.ndarray | DictNpArrayNix
The new data to set
"""
if (type(k)==str):
self.__dict__[k] = data
else:
for key_type in self.keys:
key = key_type[0]
self.__dict__[key][k] = data[key]
# def keys(self):
# return self.__dict__.keys()
def addNewMember(self, key, member):
""" Add a new class member
The ncols is updated also.
Be careful if the target for adding members is a sub member, the ncols of its parent is not updated.
This can cause issue for the parent when the size of data is needed to calculate.
Thus after calling of this function, please also increase the ncols of parents for consistence.
Parameters
----------
key: string
new member name
member: numpy.ndarray | DictNpArrayNix
data binding to the member, should be the same size as existing members in the class
"""
new_key_flag=False
if (key in self.__dict__.keys()):
member_old = self.__dict__[key]
dimension = int(1)
if (type(member_old)==np.ndarray):
if len(member_old.shape)>1:
dimension = member_old.shape[1]
elif (issubclass(type(member_old), DictNpArrayMix)):
dimension = member_old.ncols
self.ncols -= dimension
member_old = member
else:
self.__dict__[key] = member
new_key_flag=True
dimension = int(1)
if (type(member)==np.ndarray):
if len(member.shape)>1:
dimension = member.shape[1]
if(new_key_flag): self.keys.append([key,(type(member[:,0]),dimension)])
else:
if(new_key_flag): self.keys.append([key,type(member)])
elif (issubclass(type(member), DictNpArrayMix)):
dimension = member.ncols
if(new_key_flag): self.keys.append([key,type(member)])
else:
raise ValueError('New member type should be np.ndarray or DictNpArrayMix, but given ',type(member))
self.ncols += dimension
if (self.size != int(member.size/dimension)):
raise ValueError('New member has different size: ',member.size/dimension, ' host size: ',self.size)
def getherDataToArray(self):
""" gether all data to a 2D numpy.ndarray and return it
An inverse function to readArray
"""
dat_out=np.zeros([self.size,self.ncols])
icol = int(0)
for key_type in self.keys:
key = key_type[0]
member = self.__dict__[key]
if (type(member)==np.ndarray):
if len(member.shape)>1:
dimension= member.shape[1]
if (dat_out.shape[0]!=member.shape[0]):
raise ValueError('Member ',key,' size,dimension ',member.shape,' is not consistent with data output size',dat_out.shape[0])
for k in range(dimension):
dat_out[:,icol] = member[:,k]
icol += 1
else:
dat_out[:,icol] = member
icol += 1
elif (issubclass(type(member), DictNpArrayMix)):
ncols = member.ncols
dat_out[:,icol:icol+ncols] = member.getherDataToArray()
icol += ncols
return dat_out
def append(self, *_dat):
""" Map the numpy.append function to each member
Append the numpy.ndarray of each member in a group of input data to the corresponding member in self
Parameters
*_dat: inherited DictNpArrayMix
The data should contain all members existing in the self
"""
#for idat in _dat:
# if (type(idat) != type(self)):
# raise ValueError('Initial fail, date type not consistent, type [0] is ',type(self),' given ',type(idat))
data_with_self = [self]+list(_dat)
for key, item in self.__dict__.items():
if (type(item) == np.ndarray):
if (len(item.shape)!=len(_dat[0][key].shape)):
raise ValueError('Appending data member ',key,' has shape',_dat[0][key].shape,' but self data has shape',item.shape)
self.__dict__[key] = np.concatenate(tuple(map(lambda x:x.__dict__[key], data_with_self)))
elif(issubclass(type(item), DictNpArrayMix)):
self.__dict__[key].append(*tuple(map(lambda x:x.__dict__[key], _dat)))
self.size += np.sum(tuple(map(lambda x:x.size, _dat)))
def savetxt(self, fname, **kwargs):
""" Save class member data to a file
Use the getherDataToArray and then numpy.savetxt
Parameters
----------
fname: string
name of the output file
kwargs: dict
keyword arguments for numpy.savetxt
"""
dat_out= self.getherDataToArray()
np.savetxt(fname, dat_out, **kwargs)
def loadtxt(self, fname, **kwargs):
""" Load class member data from a file
Use numpy.loadtxt to read data and then use readArray
Parameters
----------
fname: string
name of the input file
kwargs: dict
keyword arguments for numpy.loadtxt
"""
dat_int = np.loadtxt(fname, ndmin=2, **kwargs)
self.readArray(dat_int, **kwargs)
def collectDtype(self):
""" Collect dtype from keys iteratively for reading BINARY format
For member with type of DictNpArrayMix, use column name with the prefix of member name + '.'
"""
dt=[]
for key, parameter in self.keys:
if (type(parameter) == type):
if (issubclass(parameter, DictNpArrayMix)):
dt_sub = self.__dict__[key].collectDtype()
for item in dt_sub:
dt.append((key+'.'+item[0], item[1]))
else:
dt.append((key, parameter))
elif (type(parameter) == tuple):
if (type(parameter[0]) == type) & (type(parameter[1]) == int):
dt.append((key, parameter))
elif (type(parameter[0]) == type) & (type(parameter[1])==dict):
if(issubclass(parameter[0], DictNpArrayMix)):
dt_sub = self.__dict__[key].collectDtype()
for item in dt_sub:
dt.append((key+'.'+item[0], item[1]))
else:
dt.append((key, parameter[0]))
else:
raise ValueError('Initial fail, unknown key type ',parameter[0],' and column count ', parameter[1] )
else:
raise ValueError('Initial fail, unknown key parameter, should be DictNpArrayMix type name or value of int, given ',parameter)
return dt
def readArrayWithName(self, _dat, _prefix='', **kwargs):
""" Read class member data from a numpy.ndarray with names
Parameters
----------
_dat: numpy.ndarray
Read array with names of columns (key/member name of class)
_prefix: string ('')
The prefix add in front of the name of columns to read. This is used when the current class instance is a sub member (member name is consistent with prefix)
kwaygs: dict ()
keyword arguments
"""
icol = int(0)
self.size = int(0)
for key, parameter in self.keys:
if (type(parameter) == type):
if (issubclass(parameter, DictNpArrayMix)):
self.__dict__[key].readArrayWithName(_dat, _prefix+key+'.', **kwargs)
icol += self.__dict__[key].ncols
self.size += self.__dict__[key].size*self.__dict__[key].ncols
else:
self.__dict__[key] = _dat[_prefix+key]
icol += 1
self.size += self.__dict__[key].size
elif (type(parameter) == tuple):
if (type(parameter[0]) == type) & (type(parameter[1]) == int):
self.__dict__[key] = _dat[_prefix+key]
icol += parameter[1]
self.size += self.__dict__[key].size
elif (type(parameter[0]) == type) & (type(parameter[1])==dict):
if(issubclass(parameter[0], DictNpArrayMix)):
self.__dict__[key].readArrayWithName(_dat, _prefix+key+'.', **kwargs,**parameter[1])
icol += self.__dict__[key].ncols
self.size += self.__dict__[key].size*self.__dict__[key].ncols
else:
self.__dict__[key] = _dat[_prefix+key]
icol += 1
self.size += self.__dict__[key].size
else:
raise ValueError('Initial fail, unknown key type ',parameter[0],' and column count ', parameter[1] )
else:
raise ValueError('Initial fail, unknown key parameter, should be DictNpArrayMix type name or value of int, given ',parameter)
self.size = int(self.size/icol)
if (self.size != _dat.size):
raise ValueError('Reading error, final counted size ',self.size,' is not consistent with reading ndarray shape',_dat.size)
if (self.ncols != icol):
raise ValueError('Column number inconsistence, self ncols ',self.ncols,' key ncols ', icol)
def fromfile(self, fname, **kwargs):
""" Load clas member data from a file using BINARY format
Use numpy.fromfile to read data, the dtype is defined by keys (members)
Notice if the first line is header, offset counts in byte should be used
Parameters
----------
fname: string
name of the input file
kwargs: dict
keyword arguments for numpy.fromfile, notice dtype is already defined, do not provide that
"""
dt = self.collectDtype()
dat_int = np.fromfile(fname, dtype=dt, **kwargs)
self.readArrayWithName(dat_int, '', **kwargs)
def printSize(self):
""" print size of each member
Print the shape of each members, used for testing whether the members have consistent size
"""
for key_type in self.keys:
key = key_type[0]
member = self.__dict__[key]
if (type(member)==np.ndarray):
print(key,member.shape)
elif (issubclass(type(member), DictNpArrayMix)):
member.printSize()
def join(*_dat):
""" Join multiple data to one
For a list of data with the same type of inherited DictNpArrayNix, this function join all data to one
Parameters
----------
*_dat: inherited DictNpArrayNix
a group of data to join
return: new joined data
"""
type0 = type(_dat[0])
for idat in _dat:
if (type(idat) != type0):
raise ValueError('Initial fail, date type not consistent, type [0] is ',type0,' given ',type(idat))
new_dat = type0(**_dat[0].initargs)
for key, item in _dat[0].__dict__.items():
if (type(item) == np.ndarray):
new_dat.__dict__[key] = np.concatenate(tuple(map(lambda x:x.__dict__[key], _dat)))
elif(issubclass(type(item), DictNpArrayMix)):
new_dat.__dict__[key] = join(*tuple(map(lambda x:x.__dict__[key], _dat)))
else:
new_dat.__dict__[key] = _dat[0].__dict__[key]
new_dat.size = np.sum(tuple(map(lambda x:x.size, _dat)))
return new_dat
# vector dot of x, y
vecDot = lambda x,y: np.sum(x*y,axis=1)
def cantorPairing(id1, id2):
""" Use CantorPairing to map two components id to one binary id
Parameters
----------
id1: 1D numpy.ndarray or int
ID for component 1
id2: 1D numpy.ndarray or int
ID for component 2
return: binary id
"""
i1=np.minimum(id1,id2).astype('int64')
i2=np.maximum(id1,id2).astype('int64')
return ((i1+i2+1)*(i1+i2)/2+i2).astype('int64')
def calcTrh(N, rh, m, G, gamma=0.02):
""" Calculate Spitzer one-component half-mass relaxation time
Trh = 0.138 N^0.5 Rh^1.5 /( G^0.5 m^0.5 ln(gamma N))
Parameters
----------
N: 1D numpy.ndarray or int
Total number of particles
rh: 1D numpy.ndarray or float
Half-mass radius
m: 1D numpy.ndarray or float
mass of one particle
G: float
Gravitational constant
gamma: float (0.02 # Giersz M., Heggie D. C., 1996, MNRAS, 279, 1037)
The coefficient for Coulomb logarithm
return: half-mass relaxation time
"""
return 0.138*N**0.5*rh**1.5/(m**0.5*np.log(gamma*N)*G**0.5)
def calcTcr(M, rh, G):
""" Calculate half-mass crossing time
Tcr = Rh^1.5/sqrt(G M)
Parameters
----------
M: 1D numpy.ndarray or float
total mass of the system
rh: 1D numpy.ndarray or float
Half-mass radius
G: float
Gravitational constant
return: half-mass crossing time
"""
return rh**1.5/np.sqrt(G*M)
| 48.021978
| 360
| 0.544966
|
882761e93b9325efca3dd198a5edcd315841e87f
| 480
|
py
|
Python
|
claudius_dev/constant.py
|
zmoitier/accoster
|
648b9edf7e73848eacb60af0885be4d30fdbbafc
|
[
"MIT"
] | null | null | null |
claudius_dev/constant.py
|
zmoitier/accoster
|
648b9edf7e73848eacb60af0885be4d30fdbbafc
|
[
"MIT"
] | 5
|
2020-12-04T21:17:00.000Z
|
2020-12-06T19:54:36.000Z
|
claudius_dev/constant.py
|
zmoitier/accoster
|
648b9edf7e73848eacb60af0885be4d30fdbbafc
|
[
"MIT"
] | 1
|
2020-11-18T17:24:52.000Z
|
2020-11-18T17:24:52.000Z
|
""" Constant class """
from dataclasses import dataclass
from typing import Union
import numpy as np
@dataclass(frozen=True)
class Constant:
"""
Dataclass to describe the constant function _ ↦ value.
Attributes
----------
value : Union[int, float, complex]
Scalar value
"""
value: Union[int, float, complex]
def __call__(self, r):
return np.full_like(r, self.value)
def __repr__(self):
return f"_ ↦ {self.value}"
| 18.461538
| 58
| 0.627083
|
caf7109a216b415b4600a19f6e38c0eecc9526de
| 505
|
py
|
Python
|
stackimpact/utils.py
|
timgates42/stackimpact-python
|
4d0a415b790c89e7bee1d70216f948b7fec11540
|
[
"BSD-3-Clause"
] | 742
|
2017-06-26T13:16:34.000Z
|
2022-02-06T11:05:31.000Z
|
stackimpact/utils.py
|
gaecom/stackimpact-python
|
4d0a415b790c89e7bee1d70216f948b7fec11540
|
[
"BSD-3-Clause"
] | 7
|
2017-06-28T06:01:04.000Z
|
2021-05-18T20:06:53.000Z
|
stackimpact/utils.py
|
gaecom/stackimpact-python
|
4d0a415b790c89e7bee1d70216f948b7fec11540
|
[
"BSD-3-Clause"
] | 30
|
2017-06-27T15:26:04.000Z
|
2021-05-16T11:08:53.000Z
|
import time
import uuid
import base64
import hashlib
def millis():
return int(round(time.time() * 1000))
def timestamp():
return int(time.time())
def base64_encode(s):
return base64.b64encode(s.encode('utf-8')).decode('utf-8')
def base64_decode(b):
return base64.b64decode(b).decode('utf-8')
def generate_uuid():
return str(uuid.uuid4())
def generate_sha1(text):
sha1_hash = hashlib.sha1()
sha1_hash.update(text.encode('utf-8'))
return sha1_hash.hexdigest()
| 14.852941
| 62
| 0.681188
|
28a6e8b53e3208b8fd03a53a0baf57c9ca7e7fa7
| 1,937
|
py
|
Python
|
docs/conf.py
|
Nasko29/progimage-python
|
4ae6de092e1f60b5612b43eb8c36f82e0d466c67
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Nasko29/progimage-python
|
4ae6de092e1f60b5612b43eb8c36f82e0d466c67
|
[
"MIT"
] | 5
|
2021-03-18T23:34:32.000Z
|
2022-03-11T23:44:11.000Z
|
docs/conf.py
|
Nasko29/progimage-python
|
4ae6de092e1f60b5612b43eb8c36f82e0d466c67
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'progimage'
copyright = '2019, Nasko Grozdanov'
author = 'Nasko Grozdanov'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
master_doc = 'index'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 32.830508
| 79
| 0.661332
|
cbe9fd394bb868044dc5245f6b049c44aeda5a38
| 133
|
py
|
Python
|
example_in.py
|
roberhag/variables-to-argparse
|
7097679b14c5ec2eeca19e8ca9ecfa1c8dd27117
|
[
"Unlicense"
] | 1
|
2022-03-24T11:40:25.000Z
|
2022-03-24T11:40:25.000Z
|
example_in.py
|
roberhag/variables-to-argparse
|
7097679b14c5ec2eeca19e8ca9ecfa1c8dd27117
|
[
"Unlicense"
] | null | null | null |
example_in.py
|
roberhag/variables-to-argparse
|
7097679b14c5ec2eeca19e8ca9ecfa1c8dd27117
|
[
"Unlicense"
] | null | null | null |
# Here follows example input:
fs = 48000 # Sample rate (Hz)
base_freq = 100. # Base frequency (Hz)
bpm = 120 # Beats per minute
| 19
| 39
| 0.669173
|
9a5dd921fdab0f95903b7c7022138b979b51c0db
| 1,243
|
py
|
Python
|
test/proj4/proj-regression-EPSG-3857-3.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 7
|
2019-03-19T09:32:41.000Z
|
2022-02-07T13:20:33.000Z
|
test/proj4/proj-regression-EPSG-3857-3.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 2
|
2021-03-30T05:37:20.000Z
|
2021-08-17T13:58:04.000Z
|
test/proj4/proj-regression-EPSG-3857-3.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 5
|
2019-03-19T10:43:46.000Z
|
2021-09-09T14:28:39.000Z
|
from Magics.macro import *
import os
def plot_area(epsg, llx, lly, urx, ury):
img = os.path.basename(__file__).split('.')[0]
title = "Projection {} : [{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(epsg, llx, lly, urx, ury)
#Setting output
png = output(
output_formats = ['png'],
output_name = img,
output_name_first_page_number = 'off')
#Setting the geographical area
area = mmap(
subpage_lower_left_latitude = lly,
subpage_lower_left_longitude = llx,
subpage_map_projection = epsg,
subpage_upper_right_latitude = ury,
subpage_upper_right_longitude = urx,
subpage_map_area_definition = "corners"
)
#Setting the coastlines
background = mcoast(
map_coastline_land_shade = 'on',
map_coastline_resolution = "medium",
map_coastline_land_shade_colour = 'cream')
#Picking the grib metadata
title = mtext(
text_lines = [title],
text_justification = 'left',
text_font_size = 0.6,
text_colour = 'charcoal')
#Plotting
plot(png,area,background,title,)
plot_area("EPSG:3857", 45.14587546411987, -80.50121559160412, 110.0889373806949, -12.559177107495842 )
| 28.906977
| 103
| 0.630732
|
9e92ce4b916b56b0fb1c6abc1e465c285c345d56
| 667
|
py
|
Python
|
event_manager/log.py
|
cj-lin/event-manager
|
568368bd3500453b0dbd68017f63560b31ddd6d1
|
[
"MIT"
] | null | null | null |
event_manager/log.py
|
cj-lin/event-manager
|
568368bd3500453b0dbd68017f63560b31ddd6d1
|
[
"MIT"
] | null | null | null |
event_manager/log.py
|
cj-lin/event-manager
|
568368bd3500453b0dbd68017f63560b31ddd6d1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Filename: log
Author: CJ Lin
"""
import logging
import logging.handlers
import pathlib
def get_logger(prefix: pathlib.Path, is_debug: bool) -> logging.Logger:
if prefix:
prefix.parent.mkdir(parents=True, exist_ok=True)
handler = logging.handlers.TimedRotatingFileHandler(prefix, when="midnight")
handler.suffix = "%Y%m%d"
else:
handler = logging.StreamHandler()
log = logging.getLogger()
log.setLevel(logging.DEBUG if is_debug else logging.INFO)
handler.setFormatter(logging.Formatter("%(asctime)s\n%(message)s\n"))
log.addHandler(handler)
return log
| 23.821429
| 84
| 0.685157
|
df96ec3ef7fb0efae6f3cc025b0221a2740b09ef
| 362
|
py
|
Python
|
tests/logging_tests/logconfig.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/logging_tests/logconfig.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/logging_tests/logconfig.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
import logging
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
class MyHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.config = settings.LOGGING
class MyEmailBackend(BaseEmailBackend):
def send_messages(self, email_messages):
pass
| 22.625
| 60
| 0.718232
|
5766458373dbcd1ec987f984b8e29c6312728b76
| 2,692
|
py
|
Python
|
tests/test_meteo.py
|
SalishSeaCast/SOG-Bloomcast-Ensemble
|
4c5ff082bc82fa1bc4a371c6b8859ccdd411f3a7
|
[
"Apache-2.0"
] | 1
|
2020-02-10T21:43:43.000Z
|
2020-02-10T21:43:43.000Z
|
tests/test_meteo.py
|
SalishSeaCast/SOG-Bloomcast-Ensemble
|
4c5ff082bc82fa1bc4a371c6b8859ccdd411f3a7
|
[
"Apache-2.0"
] | 5
|
2020-02-08T21:37:54.000Z
|
2020-02-08T21:37:56.000Z
|
tests/test_meteo.py
|
SalishSeaCast/SOG-Bloomcast-Ensemble
|
4c5ff082bc82fa1bc4a371c6b8859ccdd411f3a7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011-2021 Doug Latornell and The University of British Columbia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for SoG-bloomcast meteo module.
"""
import datetime
from unittest.mock import Mock
import pytest
@pytest.fixture
def meteo():
from bloomcast.meteo import MeteoProcessor
return MeteoProcessor(Mock(name='config'))
class TestMeteoProcessor():
"""Unit tests for MeteoProcessor object.
"""
def test_read_cloud_fraction_single_avg(self, meteo):
"""read_cloud_fraction returns expected value for single avg CF list
"""
meteo.config.climate.meteo.cloud_fraction_mapping = {
'Drizzle': [9.9675925925925934],
}
record = Mock(name='record')
record.find().text = 'Drizzle'
cloud_faction = meteo.read_cloud_fraction(record)
assert cloud_faction == 9.9675925925925934
def test_read_cloud_fraction_monthly_avg(self, meteo):
"""read_cloud_fraction returns expected value for monthly avg CF list
"""
meteo.config.climate.meteo.cloud_fraction_mapping = {
'Fog': [
9.6210045662100452, 9.3069767441860467, 9.5945945945945947,
9.5, 9.931034482758621, 10.0, 9.7777777777777786,
9.6999999999999993, 7.8518518518518521, 8.9701492537313428,
9.2686980609418281, 9.0742358078602621]
}
record = Mock(name='record')
record.find().text = 'Fog'
def mock_timestamp_data(part):
parts = {'year': 2012, 'month': 4, 'day': 1, 'hour': 12}
return parts[part]
record.get = mock_timestamp_data
cloud_faction = meteo.read_cloud_fraction(record)
assert cloud_faction == 9.5
def test_format_data(self, meteo):
"""format_data generator returns formatted forcing data file line
"""
meteo.config.climate.meteo.station_id = '889'
meteo.data['air_temperature'] = [
(datetime.datetime(2011, 9, 25, i, 0, 0), 215.0)
for i in range(24)]
line = next(meteo.format_data('air_temperature'))
assert line == '889 2011 09 25 42' + ' 215.00' * 24 + '\n'
| 37.388889
| 77
| 0.664933
|
810e51566d90f3df8abd2bb3c21ccb5c183080e2
| 32
|
py
|
Python
|
test_files/uctable_Zl.py
|
garrettluu/js-adler32
|
b40011c6f76bb2a0b78126fc8412ff380c905dcd
|
[
"Apache-2.0"
] | 224
|
2015-01-07T06:06:33.000Z
|
2022-03-29T08:52:49.000Z
|
test_files/uctable_Zl.py
|
stof/js-adler32
|
b7700fd5fa4c1ecfb4e67d20ed0248154940fcdb
|
[
"Apache-2.0"
] | 18
|
2015-05-06T22:30:17.000Z
|
2022-03-30T18:15:51.000Z
|
test_files/uctable_Zl.py
|
stof/js-adler32
|
b7700fd5fa4c1ecfb4e67d20ed0248154940fcdb
|
[
"Apache-2.0"
] | 38
|
2015-01-28T03:34:17.000Z
|
2022-02-10T03:28:14.000Z
|
uctable = [ [ 226, 128, 168 ] ]
| 16
| 31
| 0.5
|
1b67811354ce0dbeadcc53f2bf0286cbcf400e12
| 74,574
|
py
|
Python
|
src/Lib/_pyio.py
|
martinphellwig/brython_wf
|
e169afc1e048cba0c12118b4cd6f109df6fe67c9
|
[
"BSD-3-Clause"
] | 3
|
2017-04-04T06:18:16.000Z
|
2020-01-17T02:03:39.000Z
|
src/Lib/_pyio.py
|
martinphellwig/brython_wf
|
e169afc1e048cba0c12118b4cd6f109df6fe67c9
|
[
"BSD-3-Clause"
] | 1
|
2019-07-08T05:28:59.000Z
|
2021-01-14T10:44:49.000Z
|
src/Lib/_pyio.py
|
martinphellwig/brython_wf
|
e169afc1e048cba0c12118b4cd6f109df6fe67c9
|
[
"BSD-3-Clause"
] | 8
|
2017-06-27T05:38:52.000Z
|
2021-06-19T16:00:03.000Z
|
"""
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
# Import _thread instead of threading to reduce startup cost
try:
from _thread import allocate_lock as Lock
except ImportError:
from _dummy_thread import allocate_lock as Lock
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("axrwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
creating = "x" in modes
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if creating or writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (creating or reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(creating and "x" or "") +
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd, opener=opener)
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif creating or writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an IOError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Some operating systems / file systems could provide additional values.
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise UnsupportedOperation.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise UnsupportedOperation.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise UnsupportedOperation.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line of bytes from the stream.
If limit is specified, at most limit bytes will be read.
Limit should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes, where n is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes, where n is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call,
where n is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError("can not serialize a '{0}' object"
.format(self.__class__.__name__))
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_pyio.{0}>".format(clsname)
else:
return "<_pyio.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
return memoryview(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
if hasattr(self.raw, 'readall'):
chunk = self.raw.readall()
if chunk is None:
return buf[pos:] or None
else:
return buf[pos:] + chunk
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except InterruptedError:
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except InterruptedError:
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except InterruptedError:
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except InterruptedError:
continue
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE):
"""Constructor.
The arguments are two RawIO instances.
"""
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream, where n is an int.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
# The write_through argument has no effect here since this
# implementation always writes through. The argument is present only
# so that the signature can match the signature of the C version.
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding(False)
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
self._b2cratio = 0.0
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<_pyio.TextIOWrapper"
try:
name = self.name
except AttributeError:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except AttributeError:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
decoded_chars = self._decoder.decode(input_chunk, eof)
self._set_decoded_chars(decoded_chars)
if decoded_chars:
self._b2cratio = len(input_chunk) / len(self._decoded_chars)
else:
self._b2cratio = 0.0
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Fast search for an acceptable start point, close to our
# current pos.
# Rationale: calling decoder.decode() has a large overhead
# regardless of chunk size; we want the number of such calls to
# be O(1) in most situations (common decoders, non-crazy input).
# Actually, it will be exactly 1 for fixed-size codecs (all
# 8-bit codecs, also UTF-16 and UTF-32).
skip_bytes = int(self._b2cratio * chars_to_skip)
skip_back = 1
assert skip_bytes <= len(next_input)
while skip_bytes > 0:
decoder.setstate((b'', dec_flags))
# Decode up to temptative start point
n = len(decoder.decode(next_input[:skip_bytes]))
if n <= chars_to_skip:
b, d = decoder.getstate()
if not b:
# Before pos and no bytes buffered in decoder => OK
dec_flags = d
chars_to_skip -= n
break
# Skip back by buffered amount and reset heuristic
skip_bytes -= len(b)
skip_back = 1
else:
# We're too far ahead, skip back a bit
skip_bytes -= skip_back
skip_back = skip_back * 2
else:
skip_bytes = 0
decoder.setstate((b'', dec_flags))
# Note our initial start point.
start_pos = position + skip_bytes
start_flags = dec_flags
if chars_to_skip == 0:
# We haven't moved from the start point.
return self._pack_cookie(start_pos, start_flags)
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
bytes_fed = 0
need_eof = 0
# Chars decoded since `start_pos`
chars_decoded = 0
for i in range(skip_bytes, len(next_input)):
bytes_fed += 1
chars_decoded += len(decoder.decode(next_input[i:i+1]))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("unsupported whence (%r)" % (whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
| 35.835656
| 82
| 0.573699
|
d5ec597b99c2fdb2ea74d0bc9dd923f775b7a675
| 102
|
py
|
Python
|
aploader/exceptions.py
|
WPMedia/politico-civic-ap-loader
|
a9642aaa88db9fd1afb070babafcfcb489466099
|
[
"MIT"
] | 1
|
2019-09-22T20:08:04.000Z
|
2019-09-22T20:08:04.000Z
|
aploader/exceptions.py
|
The-Politico/politico-civic-ap-loader
|
4afeebb62da4b8f22da63711e7176bf4527bccfb
|
[
"MIT"
] | 5
|
2019-01-28T17:00:46.000Z
|
2021-05-09T19:28:43.000Z
|
aploader/exceptions.py
|
WPMedia/politico-civic-ap-loader
|
a9642aaa88db9fd1afb070babafcfcb489466099
|
[
"MIT"
] | 1
|
2019-01-28T15:10:00.000Z
|
2019-01-28T15:10:00.000Z
|
class AploaderConfigError(Exception):
"""Raised when required config is not present."""
pass
| 20.4
| 53
| 0.715686
|
02a9ee204201f58ea58181026e1466f1a89cddaa
| 3,529
|
py
|
Python
|
textattack/loggers/visdom_logger.py
|
yuchenlin/TextAttack
|
69c64016546effa1b3b197dfbdb92477614c36ca
|
[
"MIT"
] | 1
|
2020-06-03T16:08:00.000Z
|
2020-06-03T16:08:00.000Z
|
textattack/loggers/visdom_logger.py
|
SatoshiRobatoFujimoto/TextAttack
|
a809a9bddddff9f41750949e26edde26c8af6cfa
|
[
"MIT"
] | null | null | null |
textattack/loggers/visdom_logger.py
|
SatoshiRobatoFujimoto/TextAttack
|
a809a9bddddff9f41750949e26edde26c8af6cfa
|
[
"MIT"
] | null | null | null |
import socket
from visdom import Visdom
from textattack.shared.utils import html_table_from_rows
from .logger import Logger
def port_is_open(port_num, hostname='127.0.0.1'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((hostname, port_num))
sock.close()
if result == 0: return True
return False
class VisdomLogger(Logger):
def __init__(self, env='main', port=8097, hostname='localhost'):
if not port_is_open(port, hostname=hostname):
raise socket.error(f'Visdom not running on {hostname}:{port}')
self.vis = Visdom(port=port, server=hostname, env=env)
self.windows = {}
self.sample_rows = []
def log_attack_result(self, result):
text_a, text_b = result.diff_color(color_method='html')
result_str = result.goal_function_result_str(color_method='html')
self.sample_rows.append([result_str,text_a,text_b])
def log_summary_rows(self, rows, title, window_id):
self.table(rows, title=title, window_id=window_id)
def flush(self):
self.table(self.sample_rows, title='Sample-Level Results', window_id='sample_level_results')
def log_hist(self, arr, numbins, title, window_id):
self.bar(arr, numbins=numbins, title=title, window_id=window_id)
def text(self, text_data, title=None, window_id='default'):
if window_id and window_id in self.windows:
window = self.windows[window_id]
self.vis.text(text_data, win=window)
else:
new_window = self.vis.text(text_data,
opts=dict(
title=title
)
)
self.windows[window_id] = new_window
def table(self, rows, window_id=None, title=None, header=None, style=None):
""" Generates an HTML table. """
if not window_id: window_id = title # Can provide either of these,
if not title: title = window_id # or both.
table = html_table_from_rows(rows, title=title, header=header, style_dict=style)
self.text(table_html, title=title, window_id=window_id)
def bar(self, X_data, numbins=10, title=None, window_id=None):
window = None
if window_id and window_id in self.windows:
window = self.windows[window_id]
self.vis.bar(
X=X_data,
win=window,
opts=dict(
title=title,
numbins=numbins
)
)
else:
new_window = self.vis.bar(
X=X_data,
opts=dict(
title=title,
numbins=numbins
)
)
if window_id:
self.windows[window_id] = new_window
def hist(self, X_data, numbins=10, title=None, window_id=None):
window = None
if window_id and window_id in self.windows:
window = self.windows[window_id]
self.vis.histogram(
X=X_data,
win=window,
opts=dict(
title=title,
numbins=numbins
)
)
else:
new_window = self.vis.histogram(
X=X_data,
opts=dict(
title=title,
numbins=numbins
)
)
if window_id:
self.windows[window_id] = new_window
| 34.598039
| 100
| 0.561916
|
da61a3f0662510a4a43b12acdb6790f8ddda7498
| 93,483
|
py
|
Python
|
qa/rpc-tests/p2p-segwit.py
|
frkl/creativechain-core
|
b25ef5931488f3b514361d36851f5aee4a01248d
|
[
"MIT"
] | 34
|
2017-01-29T01:29:29.000Z
|
2021-08-16T00:13:29.000Z
|
qa/rpc-tests/p2p-segwit.py
|
frkl/creativechain-core
|
b25ef5931488f3b514361d36851f5aee4a01248d
|
[
"MIT"
] | 6
|
2017-03-02T17:57:31.000Z
|
2017-08-24T14:26:27.000Z
|
qa/rpc-tests/p2p-segwit.py
|
frkl/creativechain-core
|
b25ef5931488f3b514361d36851f5aee4a01248d
|
[
"MIT"
] | 14
|
2017-03-02T17:40:49.000Z
|
2019-03-01T02:18:48.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_ACTIVATION_THRESHOLD = 108
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
'''
SegWit p2p test.
'''
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
# Note: we can reduce code by using SingleNodeConnCB (in master, not 0.12)
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.sleep_time = 0.05
self.getdataset = set()
self.last_reject = None
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_inv(self, conn, message):
self.last_inv = message
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_pong(self, conn, message):
self.last_pong = message
def on_reject(self, conn, message):
self.last_reject = message
#print (message)
# Syncing helpers
def sync(self, test_function, timeout=60):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function, timeout)
self.ping_counter += 1
return
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
self.sync(test_function, timeout)
return
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_getdata != None
self.sync(test_function, timeout)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
self.sync(test_function, timeout)
def wait_for_inv(self, expected_inv, timeout=60):
test_function = lambda: self.last_inv != expected_inv
self.sync(test_function, timeout)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_getdata = None
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
return
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_getdata = None
self.last_getheaders = None
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
return
def announce_block(self, block, use_header):
with mininode_lock:
self.last_getdata = None
if use_header:
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_block = None
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_reject.reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO(object):
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1"]))
# Start a node for testing IsStandard rules.
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1", "-acceptnonstdtxn=0"]))
connect_nodes(self.nodes[0], 1)
# Disable segwit's bip9 parameter to simulate upgrading after activation.
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-bip9params=segwit:0:0"]))
connect_nodes(self.nodes[0], 2)
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=VB_TOP_BITS):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
print("\tVerifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
print("\tTesting non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
print("\tTesting behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, bitcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_reject.reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
print("\tTesting witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
print("\tTesting witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
print("\tTesting witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
print("\tTesting extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-200000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
print("\tTesting maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
print("\tTesting maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
print("\tTesting witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_getdata.inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
print("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-100000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv(CInv(1, tx2.sha256)) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv(CInv(1, tx3.sha256))
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
print("\tTesting block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
# Creativecoin: Blocks with nVersion < VB_TOP_BITS are rejected
# block2 = self.build_next_block(nVersion=4)
# block2.solve()
# self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
# assert(self.test_node.last_getdata.inv[0].type == blocktype)
# self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
# Creativecoin: Blocks with nVersion < VB_TOP_BITS are rejected
block4 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
self.old_node.announce_block(block4, use_header=False)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
print("\tTesting standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-100000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000000, scriptPubKey)]
tx.vout.append(CTxOut(800000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(700000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
print("\tTesting standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 400000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-100000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-100000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 100000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_reject.reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
print("\tTesting premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
print("\tTesting segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 100000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
print("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
print("\tTesting P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
# the test.
def test_upgrade_after_activation(self, node, node_id):
print("\tTesting software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
stop_node(node, node_id)
self.nodes[node_id] = start_node(node_id, self.options.tmpdir, ["-debug"])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(node, 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = node.getblockcount()
while height >= 0:
block_hash = node.getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), node.getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
print("\tTesting sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
print("\tTesting getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# TODO: this duplicates some code from blocktools.py, would be nice
# to refactor.
# Check that default_witness_commitment is present.
block = CBlock()
witness_root = block.get_merkle_root([ser_uint256(0), ser_uint256(txid)])
check_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(0)))
from test_framework.blocktools import WITNESS_COMMITMENT_HEADER
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(check_commitment)
script = CScript([OP_RETURN, output_data])
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
print("\tTesting uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-100000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-100000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
print("\tTesting detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 100000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def test_reject_blocks(self):
print ("\tTesting rejection of block.nVersion < BIP9_TOP_BITS blocks")
block = self.build_next_block(nVersion=4)
block.solve()
resp = self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(resp, 'bad-version(0x00000004)')
>>>>>>> 123bb0adafc31b541e037d916f8c71682ffb46c7
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
print("\nStarting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
print("\nTesting behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
print("\nTesting behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_reject_blocks()
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(self.nodes[2], 2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
| 45.645996
| 143
| 0.658098
|
0aab2534255940cc68d1d8468cf59b5dd0662508
| 2,134
|
py
|
Python
|
examples/factoranalysis/latent_factor_from_tfa.py
|
osaaso3/brainiak
|
153552c9b65e8354fa45985454f96978e0a92579
|
[
"Apache-2.0"
] | 235
|
2017-10-31T22:58:14.000Z
|
2022-03-23T06:16:57.000Z
|
examples/factoranalysis/latent_factor_from_tfa.py
|
osaaso3/brainiak
|
153552c9b65e8354fa45985454f96978e0a92579
|
[
"Apache-2.0"
] | 205
|
2017-10-24T16:55:16.000Z
|
2022-03-31T22:56:14.000Z
|
examples/factoranalysis/latent_factor_from_tfa.py
|
osaaso3/brainiak
|
153552c9b65e8354fa45985454f96978e0a92579
|
[
"Apache-2.0"
] | 104
|
2017-11-01T20:04:07.000Z
|
2022-03-25T18:55:34.000Z
|
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
import scipy.io
import numpy as np
import nibabel as nib
from subprocess import call
from scipy.stats import stats
from nilearn.input_data import NiftiMasker
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
#download data
data_dir = os.path.join(os.getcwd(), 'data')
file_name = os.path.join(data_dir, 's0.mat')
url = ' https://www.dropbox.com/s/r5s9tg4ekxzbrco/s0.mat?dl=0'
cmd = 'curl --location --create-dirs -o ' + file_name + url
try:
retcode = call(cmd, shell=True)
if retcode < 0:
print("File download was terminated by signal", -retcode, file=sys.stderr)
else:
print("File download returned", retcode, file=sys.stderr)
except OSError as e:
print("File download failed:", e, file=sys.stderr)
#get fMRI data and scanner RAS coordinates
all_data = scipy.io.loadmat(file_name)
data = all_data['data']
R = all_data['R']
# Z-score the data
data = stats.zscore(data, axis=1, ddof=1)
n_voxel, n_tr = data.shape
# Run TFA with downloaded data
from brainiak.factoranalysis.tfa import TFA
# uncomment below line to get help message on TFA
#help(TFA)
tfa = TFA(K=5,
max_num_voxel=int(n_voxel*0.5),
max_num_tr=int(n_tr*0.5),
verbose=True)
tfa.fit(data, R)
print("\n centers of latent factors are:")
print(tfa.get_centers(tfa.local_posterior_))
print("\n widths of latent factors are:")
widths = tfa.get_widths(tfa.local_posterior_)
print(widths)
print("\n stds of latent RBF factors are:")
rbf_std = np.sqrt(widths/(2.0))
print(rbf_std)
| 30.927536
| 82
| 0.730084
|
0948cee11197bad52aa7f361913a803fa216b709
| 14,555
|
py
|
Python
|
vendor-local/lib/python/celery/app/base.py
|
Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c
|
f80e7c0cff97a1e9b301aa04015db983c7645778
|
[
"BSD-3-Clause"
] | 4
|
2015-05-08T16:58:53.000Z
|
2019-09-06T05:30:59.000Z
|
vendor-local/lib/python/celery/app/base.py
|
Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c
|
f80e7c0cff97a1e9b301aa04015db983c7645778
|
[
"BSD-3-Clause"
] | 2
|
2019-02-17T17:44:53.000Z
|
2019-03-28T03:54:39.000Z
|
vendor-local/lib/python/celery/app/base.py
|
Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c
|
f80e7c0cff97a1e9b301aa04015db983c7645778
|
[
"BSD-3-Clause"
] | 7
|
2015-05-21T15:38:29.000Z
|
2019-10-28T23:39:06.000Z
|
# -*- coding: utf-8 -*-
"""
celery.app.base
~~~~~~~~~~~~~~~
Application Base Class.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import with_statement
import os
import warnings
import platform as _platform
from contextlib import contextmanager
from copy import deepcopy
from functools import wraps
from kombu.clocks import LamportClock
from .. import datastructures
from .. import platforms
from ..exceptions import AlwaysEagerIgnored
from ..utils import cached_property, instantiate, lpmerge
from .defaults import DEFAULTS, find_deprecated_settings, find
import kombu
if kombu.VERSION < (2, 0):
raise ImportError("Celery requires Kombu version 1.1.0 or higher.")
BUGREPORT_INFO = """
platform -> system:%(system)s arch:%(arch)s imp:%(py_i)s
software -> celery:%(celery_v)s kombu:%(kombu_v)s py:%(py_v)s
settings -> transport:%(transport)s results:%(results)s
"""
class Settings(datastructures.ConfigurationView):
@property
def CELERY_RESULT_BACKEND(self):
"""Resolves deprecated alias ``CELERY_BACKEND``."""
return self.get("CELERY_RESULT_BACKEND") or self.get("CELERY_BACKEND")
@property
def BROKER_TRANSPORT(self):
"""Resolves compat aliases :setting:`BROKER_BACKEND`
and :setting:`CARROT_BACKEND`."""
return (self.get("BROKER_TRANSPORT") or
self.get("BROKER_BACKEND") or
self.get("CARROT_BACKEND"))
@property
def BROKER_BACKEND(self):
"""Deprecated compat alias to :attr:`BROKER_TRANSPORT`."""
return self.BROKER_TRANSPORT
@property
def BROKER_HOST(self):
return (os.environ.get("CELERY_BROKER_URL") or
self.get("BROKER_URL") or
self.get("BROKER_HOST"))
def find_option(self, name, namespace="celery"):
return find(name, namespace)
def get_by_parts(self, *parts):
return self["_".join(filter(None, parts))]
def find_value_for_key(self, name, namespace="celery"):
ns, key, _ = self.find_option(name, namespace=namespace)
return self.get_by_parts(ns, key)
class BaseApp(object):
"""Base class for apps."""
SYSTEM = platforms.SYSTEM
IS_OSX = platforms.IS_OSX
IS_WINDOWS = platforms.IS_WINDOWS
amqp_cls = "celery.app.amqp:AMQP"
backend_cls = None
events_cls = "celery.events:Events"
loader_cls = "celery.loaders.app:AppLoader"
log_cls = "celery.log:Logging"
control_cls = "celery.task.control:Control"
_pool = None
def __init__(self, main=None, loader=None, backend=None,
amqp=None, events=None, log=None, control=None,
set_as_current=True, accept_magic_kwargs=False, **kwargs):
self.main = main
self.amqp_cls = amqp or self.amqp_cls
self.backend_cls = backend or self.backend_cls
self.events_cls = events or self.events_cls
self.loader_cls = loader or self.loader_cls
self.log_cls = log or self.log_cls
self.control_cls = control or self.control_cls
self.set_as_current = set_as_current
self.accept_magic_kwargs = accept_magic_kwargs
self.clock = LamportClock()
self.on_init()
def on_init(self):
"""Called at the end of the constructor."""
pass
def config_from_object(self, obj, silent=False):
"""Read configuration from object, where object is either
a object, or the name of a module to import.
>>> celery.config_from_object("myapp.celeryconfig")
>>> from myapp import celeryconfig
>>> celery.config_from_object(celeryconfig)
"""
del(self.conf)
return self.loader.config_from_object(obj, silent=silent)
def config_from_envvar(self, variable_name, silent=False):
"""Read configuration from environment variable.
The value of the environment variable must be the name
of a module to import.
>>> os.environ["CELERY_CONFIG_MODULE"] = "myapp.celeryconfig"
>>> celery.config_from_envvar("CELERY_CONFIG_MODULE")
"""
del(self.conf)
return self.loader.config_from_envvar(variable_name, silent=silent)
def config_from_cmdline(self, argv, namespace="celery"):
"""Read configuration from argv.
The config
"""
self.conf.update(self.loader.cmdline_config_parser(argv, namespace))
def send_task(self, name, args=None, kwargs=None, countdown=None,
eta=None, task_id=None, publisher=None, connection=None,
connect_timeout=None, result_cls=None, expires=None,
queues=None, **options):
"""Send task by name.
:param name: Name of task to execute (e.g. `"tasks.add"`).
:keyword result_cls: Specify custom result class. Default is
using :meth:`AsyncResult`.
Supports the same arguments as
:meth:`~celery.app.task.BaseTask.apply_async`.
"""
if self.conf.CELERY_ALWAYS_EAGER:
warnings.warn(AlwaysEagerIgnored(
"CELERY_ALWAYS_EAGER has no effect on send_task"))
router = self.amqp.Router(queues)
result_cls = result_cls or self.AsyncResult
options.setdefault("compression",
self.conf.CELERY_MESSAGE_COMPRESSION)
options = router.route(options, name, args, kwargs)
exchange = options.get("exchange")
exchange_type = options.get("exchange_type")
with self.default_connection(connection, connect_timeout) as conn:
publish = publisher or self.amqp.TaskPublisher(conn,
exchange=exchange,
exchange_type=exchange_type)
try:
new_id = publish.delay_task(name, args, kwargs,
task_id=task_id,
countdown=countdown, eta=eta,
expires=expires, **options)
finally:
publisher or publish.close()
return result_cls(new_id)
def AsyncResult(self, task_id, backend=None, task_name=None):
"""Create :class:`celery.result.BaseAsyncResult` instance."""
from ..result import BaseAsyncResult
return BaseAsyncResult(task_id, app=self, task_name=task_name,
backend=backend or self.backend)
def TaskSetResult(self, taskset_id, results, **kwargs):
"""Create :class:`celery.result.TaskSetResult` instance."""
from ..result import TaskSetResult
return TaskSetResult(taskset_id, results, app=self)
def broker_connection(self, hostname=None, userid=None,
password=None, virtual_host=None, port=None, ssl=None,
insist=None, connect_timeout=None, transport=None,
transport_options=None, **kwargs):
"""Establish a connection to the message broker.
:keyword hostname: defaults to the :setting:`BROKER_HOST` setting.
:keyword userid: defaults to the :setting:`BROKER_USER` setting.
:keyword password: defaults to the :setting:`BROKER_PASSWORD` setting.
:keyword virtual_host: defaults to the :setting:`BROKER_VHOST` setting.
:keyword port: defaults to the :setting:`BROKER_PORT` setting.
:keyword ssl: defaults to the :setting:`BROKER_USE_SSL` setting.
:keyword insist: defaults to the :setting:`BROKER_INSIST` setting.
:keyword connect_timeout: defaults to the
:setting:`BROKER_CONNECTION_TIMEOUT` setting.
:keyword backend_cls: defaults to the :setting:`BROKER_TRANSPORT`
setting.
:returns :class:`kombu.connection.BrokerConnection`:
"""
conf = self.conf
return self.amqp.BrokerConnection(
hostname or conf.BROKER_HOST,
userid or conf.BROKER_USER,
password or conf.BROKER_PASSWORD,
virtual_host or conf.BROKER_VHOST,
port or conf.BROKER_PORT,
transport=transport or conf.BROKER_TRANSPORT,
insist=self.either("BROKER_INSIST", insist),
ssl=self.either("BROKER_USE_SSL", ssl),
connect_timeout=self.either(
"BROKER_CONNECTION_TIMEOUT", connect_timeout),
transport_options=dict(conf.BROKER_TRANSPORT_OPTIONS,
**transport_options or {}))
@contextmanager
def default_connection(self, connection=None, connect_timeout=None):
"""For use within a with-statement to get a connection from the pool
if one is not already provided.
:keyword connection: If not provided, then a connection will be
acquired from the connection pool.
:keyword connect_timeout: *No longer used.*
"""
if connection:
yield connection
else:
with self.pool.acquire(block=True) as connection:
yield connection
def with_default_connection(self, fun):
"""With any function accepting `connection` and `connect_timeout`
keyword arguments, establishes a default connection if one is
not already passed to it.
Any automatically established connection will be closed after
the function returns.
**Deprecated**
Use ``with app.default_connection(connection)`` instead.
"""
@wraps(fun)
def _inner(*args, **kwargs):
connection = kwargs.pop("connection", None)
with self.default_connection(connection) as c:
return fun(*args, **dict(kwargs, connection=c))
return _inner
def prepare_config(self, c):
"""Prepare configuration before it is merged with the defaults."""
find_deprecated_settings(c)
return c
def now(self):
return self.loader.now(utc=self.conf.CELERY_ENABLE_UTC)
def mail_admins(self, subject, body, fail_silently=False):
"""Send an email to the admins in the :setting:`ADMINS` setting."""
if self.conf.ADMINS:
to = [admin_email for _, admin_email in self.conf.ADMINS]
return self.loader.mail_admins(subject, body, fail_silently, to=to,
sender=self.conf.SERVER_EMAIL,
host=self.conf.EMAIL_HOST,
port=self.conf.EMAIL_PORT,
user=self.conf.EMAIL_HOST_USER,
password=self.conf.EMAIL_HOST_PASSWORD,
timeout=self.conf.EMAIL_TIMEOUT,
use_ssl=self.conf.EMAIL_USE_SSL,
use_tls=self.conf.EMAIL_USE_TLS)
def select_queues(self, queues=None):
if queues:
return self.amqp.queues.select_subset(queues,
self.conf.CELERY_CREATE_MISSING_QUEUES)
def either(self, default_key, *values):
"""Fallback to the value of a configuration key if none of the
`*values` are true."""
for value in values:
if value is not None:
return value
return self.conf.get(default_key)
def merge(self, l, r):
"""Like `dict(a, **b)` except it will keep values from `a`
if the value in `b` is :const:`None`."""
return lpmerge(l, r)
def _get_backend(self):
from ..backends import get_backend_cls
return get_backend_cls(
self.backend_cls or self.conf.CELERY_RESULT_BACKEND,
loader=self.loader)(app=self)
def _get_config(self):
return Settings({}, [self.prepare_config(self.loader.conf),
deepcopy(DEFAULTS)])
def _after_fork(self, obj_):
if self._pool:
self._pool.force_close_all()
self._pool = None
def bugreport(self):
import celery
import kombu
return BUGREPORT_INFO % {"system": _platform.system(),
"arch": _platform.architecture(),
"py_i": platforms.pyimplementation(),
"celery_v": celery.__version__,
"kombu_v": kombu.__version__,
"py_v": _platform.python_version(),
"transport": self.conf.BROKER_TRANSPORT,
"results": self.conf.CELERY_RESULT_BACKEND}
@property
def pool(self):
if self._pool is None:
try:
from multiprocessing.util import register_after_fork
register_after_fork(self, self._after_fork)
except ImportError:
pass
self._pool = self.broker_connection().Pool(
limit=self.conf.BROKER_POOL_LIMIT)
return self._pool
@cached_property
def amqp(self):
"""Sending/receiving messages. See :class:`~celery.app.amqp.AMQP`."""
return instantiate(self.amqp_cls, app=self)
@cached_property
def backend(self):
"""Storing/retrieving task state. See
:class:`~celery.backend.base.BaseBackend`."""
return self._get_backend()
@cached_property
def conf(self):
"""Current configuration (dict and attribute access)."""
return self._get_config()
@cached_property
def control(self):
"""Controlling worker nodes. See
:class:`~celery.task.control.Control`."""
return instantiate(self.control_cls, app=self)
@cached_property
def events(self):
"""Sending/receiving events. See :class:`~celery.events.Events`. """
return instantiate(self.events_cls, app=self)
@cached_property
def loader(self):
"""Current loader."""
from ..loaders import get_loader_cls
return get_loader_cls(self.loader_cls)(app=self)
@cached_property
def log(self):
"""Logging utilities. See :class:`~celery.log.Logging`."""
return instantiate(self.log_cls, app=self)
@cached_property
def tasks(self):
from ..registry import tasks
return tasks
| 37.035623
| 79
| 0.607351
|
543aba2a0e70ee1770cd7d9db950cdcf50ee5e4c
| 37,603
|
py
|
Python
|
homeassistant/core.py
|
joshluster/Home-Assistant
|
36b9c0a9462d28d8fd9aa81d5cf5fed45a38005a
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/core.py
|
joshluster/Home-Assistant
|
36b9c0a9462d28d8fd9aa81d5cf5fed45a38005a
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/core.py
|
joshluster/Home-Assistant
|
36b9c0a9462d28d8fd9aa81d5cf5fed45a38005a
|
[
"Apache-2.0"
] | 1
|
2021-03-13T18:15:31.000Z
|
2021-03-13T18:15:31.000Z
|
"""
Core components of Home Assistant.
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
# pylint: disable=unused-import, too-many-lines
import asyncio
from concurrent.futures import ThreadPoolExecutor
import enum
import logging
import os
import pathlib
import re
import sys
import threading
from time import monotonic
from types import MappingProxyType
from typing import Optional, Any, Callable, List # NOQA
from async_timeout import timeout
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.const import (
ATTR_DOMAIN, ATTR_FRIENDLY_NAME, ATTR_NOW, ATTR_SERVICE,
ATTR_SERVICE_CALL_ID, ATTR_SERVICE_DATA, EVENT_CALL_SERVICE,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
EVENT_SERVICE_EXECUTED, EVENT_SERVICE_REGISTERED, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, MATCH_ALL, EVENT_HOMEASSISTANT_CLOSE,
EVENT_SERVICE_REMOVED, __version__)
from homeassistant import loader
from homeassistant.exceptions import (
HomeAssistantError, InvalidEntityFormatError, InvalidStateError)
from homeassistant.util.async import (
run_coroutine_threadsafe, run_callback_threadsafe,
fire_coroutine_threadsafe)
import homeassistant.util as util
import homeassistant.util.dt as dt_util
import homeassistant.util.location as location
from homeassistant.util.unit_system import UnitSystem, METRIC_SYSTEM # NOQA
DOMAIN = 'homeassistant'
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Pattern for validating entity IDs (format: <domain>.<entity>)
ENTITY_ID_PATTERN = re.compile(r"^(\w+)\.(\w+)$")
# How long to wait till things that run on startup have to finish.
TIMEOUT_EVENT_START = 15
_LOGGER = logging.getLogger(__name__)
def split_entity_id(entity_id: str) -> List[str]:
"""Split a state entity_id into domain, object_id."""
return entity_id.split(".", 1)
def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format."""
return ENTITY_ID_PATTERN.match(entity_id) is not None
def valid_state(state: str) -> bool:
"""Test if a state is valid."""
return len(state) < 256
def callback(func: Callable[..., None]) -> Callable[..., None]:
"""Annotation to mark method as safe to call from within the event loop."""
# pylint: disable=protected-access
func._hass_callback = True
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return '_hass_callback' in func.__dict__
@callback
def async_loop_exception_handler(loop, context):
"""Handle all exception inside the core loop."""
kwargs = {}
exception = context.get('exception')
if exception:
kwargs['exc_info'] = (type(exception), exception,
exception.__traceback__)
_LOGGER.error("Error doing job: %s", context['message'], **kwargs)
class CoreState(enum.Enum):
"""Represent the current state of Home Assistant."""
not_running = 'NOT_RUNNING'
starting = 'STARTING'
running = 'RUNNING'
stopping = 'STOPPING'
def __str__(self) -> str:
"""Return the event."""
return self.value
class HomeAssistant(object):
"""Root object of the Home Assistant home automation."""
def __init__(self, loop=None):
"""Initialize new Home Assistant object."""
if sys.platform == 'win32':
self.loop = loop or asyncio.ProactorEventLoop()
else:
self.loop = loop or asyncio.get_event_loop()
executor_opts = {'max_workers': 10}
if sys.version_info[:2] >= (3, 5):
# It will default set to the number of processors on the machine,
# multiplied by 5. That is better for overlap I/O workers.
executor_opts['max_workers'] = None
if sys.version_info[:2] >= (3, 6):
executor_opts['thread_name_prefix'] = 'SyncWorker'
self.executor = ThreadPoolExecutor(**executor_opts)
self.loop.set_default_executor(self.executor)
self.loop.set_exception_handler(async_loop_exception_handler)
self._pending_tasks = []
self._track_task = True
self.bus = EventBus(self)
self.services = ServiceRegistry(self)
self.states = StateMachine(self.bus, self.loop)
self.config = Config() # type: Config
self.components = loader.Components(self)
self.helpers = loader.Helpers(self)
# This is a dictionary that any component can store any data on.
self.data = {}
self.state = CoreState.not_running
self.exit_code = None
@property
def is_running(self) -> bool:
"""Return if Home Assistant is running."""
return self.state in (CoreState.starting, CoreState.running)
def start(self) -> None:
"""Start home assistant."""
# Register the async start
fire_coroutine_threadsafe(self.async_start(), self.loop)
# Run forever and catch keyboard interrupt
try:
# Block until stopped
_LOGGER.info("Starting Home Assistant core loop")
self.loop.run_forever()
return self.exit_code
except KeyboardInterrupt:
self.loop.call_soon_threadsafe(
self.loop.create_task, self.async_stop())
self.loop.run_forever()
finally:
self.loop.close()
async def async_start(self):
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
_LOGGER.info("Starting Home Assistant")
self.state = CoreState.starting
# pylint: disable=protected-access
self.loop._thread_ident = threading.get_ident()
self.bus.async_fire(EVENT_HOMEASSISTANT_START)
try:
# Only block for EVENT_HOMEASSISTANT_START listener
self.async_stop_track_tasks()
with timeout(TIMEOUT_EVENT_START, loop=self.loop):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
'Something is blocking Home Assistant from wrapping up the '
'start up phase. We\'re going to continue anyway. Please '
'report the following info at http://bit.ly/2ogP58T : %s',
', '.join(self.config.components))
# Allow automations to set up the start triggers before changing state
await asyncio.sleep(0, loop=self.loop)
self.state = CoreState.running
_async_create_timer(self)
def add_job(self, target: Callable[..., None], *args: Any) -> None:
"""Add job to the executor pool.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call add_job with None")
self.loop.call_soon_threadsafe(self.async_add_job, target, *args)
@callback
def async_add_job(self, target: Callable[..., None], *args: Any) -> None:
"""Add a job from within the eventloop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
task = None
if asyncio.iscoroutine(target):
task = self.loop.create_task(target)
elif is_callback(target):
self.loop.call_soon(target, *args)
elif asyncio.iscoroutinefunction(target):
task = self.loop.create_task(target(*args))
else:
task = self.loop.run_in_executor(None, target, *args)
# If a task is scheduled
if self._track_task and task is not None:
self._pending_tasks.append(task)
return task
@callback
def async_track_tasks(self):
"""Track tasks so you can wait for all tasks to be done."""
self._track_task = True
@callback
def async_stop_track_tasks(self):
"""Stop track tasks so you can't wait for all tasks to be done."""
self._track_task = False
@callback
def async_run_job(self, target: Callable[..., None], *args: Any) -> None:
"""Run a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if not asyncio.iscoroutine(target) and is_callback(target):
target(*args)
else:
self.async_add_job(target, *args)
def block_till_done(self) -> None:
"""Block till all pending work is done."""
run_coroutine_threadsafe(
self.async_block_till_done(), loop=self.loop).result()
async def async_block_till_done(self):
"""Block till all pending work is done."""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0, loop=self.loop)
while self._pending_tasks:
pending = [task for task in self._pending_tasks
if not task.done()]
self._pending_tasks.clear()
if pending:
await asyncio.wait(pending, loop=self.loop)
else:
await asyncio.sleep(0, loop=self.loop)
def stop(self) -> None:
"""Stop Home Assistant and shuts down all threads."""
fire_coroutine_threadsafe(self.async_stop(), self.loop)
async def async_stop(self, exit_code=0) -> None:
"""Stop Home Assistant and shuts down all threads.
This method is a coroutine.
"""
# stage 1
self.state = CoreState.stopping
self.async_track_tasks()
self.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await self.async_block_till_done()
# stage 2
self.state = CoreState.not_running
self.bus.async_fire(EVENT_HOMEASSISTANT_CLOSE)
await self.async_block_till_done()
self.executor.shutdown()
self.exit_code = exit_code
self.loop.stop()
class EventOrigin(enum.Enum):
"""Represent the origin of an event."""
local = 'LOCAL'
remote = 'REMOTE'
def __str__(self):
"""Return the event."""
return self.value
class Event(object):
"""Representation of an event within the bus."""
__slots__ = ['event_type', 'data', 'origin', 'time_fired']
def __init__(self, event_type, data=None, origin=EventOrigin.local,
time_fired=None):
"""Initialize a new event."""
self.event_type = event_type
self.data = data or {}
self.origin = origin
self.time_fired = time_fired or dt_util.utcnow()
def as_dict(self):
"""Create a dict representation of this Event.
Async friendly.
"""
return {
'event_type': self.event_type,
'data': dict(self.data),
'origin': str(self.origin),
'time_fired': self.time_fired,
}
def __repr__(self):
"""Return the representation."""
# pylint: disable=maybe-no-member
if self.data:
return "<Event {}[{}]: {}>".format(
self.event_type, str(self.origin)[0],
util.repr_helper(self.data))
return "<Event {}[{}]>".format(self.event_type,
str(self.origin)[0])
def __eq__(self, other):
"""Return the comparison."""
return (self.__class__ == other.__class__ and
self.event_type == other.event_type and
self.data == other.data and
self.origin == other.origin and
self.time_fired == other.time_fired)
class EventBus(object):
"""Allow the firing of and listening for events."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new event bus."""
self._listeners = {}
self._hass = hass
@callback
def async_listeners(self):
"""Return dictionary with events and the number of listeners.
This method must be run in the event loop.
"""
return {key: len(self._listeners[key])
for key in self._listeners}
@property
def listeners(self):
"""Return dictionary with events and the number of listeners."""
return run_callback_threadsafe(
self._hass.loop, self.async_listeners
).result()
def fire(self, event_type: str, event_data=None, origin=EventOrigin.local):
"""Fire an event."""
self._hass.loop.call_soon_threadsafe(
self.async_fire, event_type, event_data, origin)
@callback
def async_fire(self, event_type: str, event_data=None,
origin=EventOrigin.local):
"""Fire an event.
This method must be run in the event loop.
"""
listeners = self._listeners.get(event_type, [])
# EVENT_HOMEASSISTANT_CLOSE should go only to his listeners
match_all_listeners = self._listeners.get(MATCH_ALL)
if (match_all_listeners is not None and
event_type != EVENT_HOMEASSISTANT_CLOSE):
listeners = match_all_listeners + listeners
event = Event(event_type, event_data, origin)
if event_type != EVENT_TIME_CHANGED:
_LOGGER.info("Bus:Handling %s", event)
if not listeners:
return
for func in listeners:
self._hass.async_add_job(func, event)
def listen(self, event_type, listener):
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen, event_type, listener).result()
def remove_listener():
"""Remove the listener."""
run_callback_threadsafe(
self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen(self, event_type, listener):
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
This method must be run in the event loop.
"""
if event_type in self._listeners:
self._listeners[event_type].append(listener)
else:
self._listeners[event_type] = [listener]
def remove_listener():
"""Remove the listener."""
self._async_remove_listener(event_type, listener)
return remove_listener
def listen_once(self, event_type, listener):
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns function to unsubscribe the listener.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen_once, event_type, listener,
).result()
def remove_listener():
"""Remove the listener."""
run_callback_threadsafe(
self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen_once(self, event_type, listener):
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
@callback
def onetime_listener(event):
"""Remove listener from eventbus and then fire listener."""
if hasattr(onetime_listener, 'run'):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(onetime_listener, 'run', True)
self._async_remove_listener(event_type, onetime_listener)
self._hass.async_run_job(listener, event)
return self.async_listen(event_type, onetime_listener)
@callback
def _async_remove_listener(self, event_type, listener):
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(listener)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.warning("Unable to remove unknown listener %s", listener)
class State(object):
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
"""
__slots__ = ['entity_id', 'state', 'attributes',
'last_changed', 'last_updated']
def __init__(self, entity_id, state, attributes=None, last_changed=None,
last_updated=None):
"""Initialize a new state."""
state = str(state)
if not valid_entity_id(entity_id):
raise InvalidEntityFormatError((
"Invalid entity id encountered: {}. "
"Format should be <domain>.<object_id>").format(entity_id))
if not valid_state(state):
raise InvalidStateError((
"Invalid state encountered for entity id: {}. "
"State max length is 255 characters.").format(entity_id))
self.entity_id = entity_id.lower()
self.state = state
self.attributes = MappingProxyType(attributes or {})
self.last_updated = last_updated or dt_util.utcnow()
self.last_changed = last_changed or self.last_updated
@property
def domain(self):
"""Domain of this state."""
return split_entity_id(self.entity_id)[0]
@property
def object_id(self):
"""Object id of this state."""
return split_entity_id(self.entity_id)[1]
@property
def name(self):
"""Name of this state."""
return (
self.attributes.get(ATTR_FRIENDLY_NAME) or
self.object_id.replace('_', ' '))
def as_dict(self):
"""Return a dict representation of the State.
Async friendly.
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
return {'entity_id': self.entity_id,
'state': self.state,
'attributes': dict(self.attributes),
'last_changed': self.last_changed,
'last_updated': self.last_updated}
@classmethod
def from_dict(cls, json_dict):
"""Initialize a state from a dict.
Async friendly.
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
if not (json_dict and 'entity_id' in json_dict and
'state' in json_dict):
return None
last_changed = json_dict.get('last_changed')
if isinstance(last_changed, str):
last_changed = dt_util.parse_datetime(last_changed)
last_updated = json_dict.get('last_updated')
if isinstance(last_updated, str):
last_updated = dt_util.parse_datetime(last_updated)
return cls(json_dict['entity_id'], json_dict['state'],
json_dict.get('attributes'), last_changed, last_updated)
def __eq__(self, other):
"""Return the comparison of the state."""
return (self.__class__ == other.__class__ and
self.entity_id == other.entity_id and
self.state == other.state and
self.attributes == other.attributes)
def __repr__(self):
"""Return the representation of the states."""
attr = "; {}".format(util.repr_helper(self.attributes)) \
if self.attributes else ""
return "<state {}={}{} @ {}>".format(
self.entity_id, self.state, attr,
dt_util.as_local(self.last_changed).isoformat())
class StateMachine(object):
"""Helper class that tracks the state of different entities."""
def __init__(self, bus, loop):
"""Initialize state machine."""
self._states = {}
self._bus = bus
self._loop = loop
def entity_ids(self, domain_filter=None):
"""List of entity ids that are being tracked."""
future = run_callback_threadsafe(
self._loop, self.async_entity_ids, domain_filter
)
return future.result()
@callback
def async_entity_ids(self, domain_filter=None):
"""List of entity ids that are being tracked.
This method must be run in the event loop.
"""
if domain_filter is None:
return list(self._states.keys())
domain_filter = domain_filter.lower()
return [state.entity_id for state in self._states.values()
if state.domain == domain_filter]
def all(self):
"""Create a list of all states."""
return run_callback_threadsafe(self._loop, self.async_all).result()
@callback
def async_all(self):
"""Create a list of all states.
This method must be run in the event loop.
"""
return list(self._states.values())
def get(self, entity_id):
"""Retrieve state of entity_id or None if not found.
Async friendly.
"""
return self._states.get(entity_id.lower())
def is_state(self, entity_id, state):
"""Test if entity exists and is specified state.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj is not None and state_obj.state == state
def remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return run_callback_threadsafe(
self._loop, self.async_remove, entity_id).result()
@callback
def async_remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
old_state = self._states.pop(entity_id, None)
if old_state is None:
return False
self._bus.async_fire(EVENT_STATE_CHANGED, {
'entity_id': entity_id,
'old_state': old_state,
'new_state': None,
})
return True
def set(self, entity_id, new_state, attributes=None, force_update=False):
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
run_callback_threadsafe(
self._loop,
self.async_set, entity_id, new_state, attributes, force_update,
).result()
@callback
def async_set(self, entity_id, new_state, attributes=None,
force_update=False):
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
new_state = str(new_state)
attributes = attributes or {}
old_state = self._states.get(entity_id)
is_existing = old_state is not None
same_state = (is_existing and old_state.state == new_state and
not force_update)
same_attr = is_existing and old_state.attributes == attributes
if same_state and same_attr:
return
last_changed = old_state.last_changed if same_state else None
state = State(entity_id, new_state, attributes, last_changed)
self._states[entity_id] = state
self._bus.async_fire(EVENT_STATE_CHANGED, {
'entity_id': entity_id,
'old_state': old_state,
'new_state': state,
})
class Service(object):
"""Representation of a callable service."""
__slots__ = ['func', 'schema', 'is_callback', 'is_coroutinefunction']
def __init__(self, func, schema):
"""Initialize a service."""
self.func = func
self.schema = schema
self.is_callback = is_callback(func)
self.is_coroutinefunction = asyncio.iscoroutinefunction(func)
class ServiceCall(object):
"""Representation of a call to a service."""
__slots__ = ['domain', 'service', 'data', 'call_id']
def __init__(self, domain, service, data=None, call_id=None):
"""Initialize a service call."""
self.domain = domain.lower()
self.service = service.lower()
self.data = MappingProxyType(data or {})
self.call_id = call_id
def __repr__(self):
"""Return the representation of the service."""
if self.data:
return "<ServiceCall {}.{}: {}>".format(
self.domain, self.service, util.repr_helper(self.data))
return "<ServiceCall {}.{}>".format(self.domain, self.service)
class ServiceRegistry(object):
"""Offer the services over the eventbus."""
def __init__(self, hass):
"""Initialize a service registry."""
self._services = {}
self._hass = hass
self._async_unsub_call_event = None
def _gen_unique_id():
cur_id = 1
while True:
yield '{}-{}'.format(id(self), cur_id)
cur_id += 1
gen = _gen_unique_id()
self._generate_unique_id = lambda: next(gen)
@property
def services(self):
"""Return dictionary with per domain a list of available services."""
return run_callback_threadsafe(
self._hass.loop, self.async_services,
).result()
@callback
def async_services(self):
"""Return dictionary with per domain a list of available services.
This method must be run in the event loop.
"""
return {domain: self._services[domain].copy()
for domain in self._services}
def has_service(self, domain, service):
"""Test if specified service exists.
Async friendly.
"""
return service.lower() in self._services.get(domain.lower(), [])
def register(self, domain, service, service_func, schema=None):
"""
Register a service.
Schema is called to coerce and validate the service data.
"""
run_callback_threadsafe(
self._hass.loop,
self.async_register, domain, service, service_func, schema
).result()
@callback
def async_register(self, domain, service, service_func, schema=None):
"""
Register a service.
Schema is called to coerce and validate the service data.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
service_obj = Service(service_func, schema)
if domain in self._services:
self._services[domain][service] = service_obj
else:
self._services[domain] = {service: service_obj}
if self._async_unsub_call_event is None:
self._async_unsub_call_event = self._hass.bus.async_listen(
EVENT_CALL_SERVICE, self._event_to_service_call)
self._hass.bus.async_fire(
EVENT_SERVICE_REGISTERED,
{ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def remove(self, domain, service):
"""Remove a registered service from service handler."""
run_callback_threadsafe(
self._hass.loop, self.async_remove, domain, service).result()
@callback
def async_remove(self, domain, service):
"""Remove a registered service from service handler.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
if service not in self._services.get(domain, {}):
_LOGGER.warning(
"Unable to remove unknown service %s/%s.", domain, service)
return
self._services[domain].pop(service)
self._hass.bus.async_fire(
EVENT_SERVICE_REMOVED,
{ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def call(self, domain, service, service_data=None, blocking=False):
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
successfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
"""
return run_coroutine_threadsafe(
self.async_call(domain, service, service_data, blocking),
self._hass.loop
).result()
async def async_call(self, domain, service, service_data=None,
blocking=False):
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
successfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
This method is a coroutine.
"""
call_id = self._generate_unique_id()
event_data = {
ATTR_DOMAIN: domain.lower(),
ATTR_SERVICE: service.lower(),
ATTR_SERVICE_DATA: service_data,
ATTR_SERVICE_CALL_ID: call_id,
}
if blocking:
fut = asyncio.Future(loop=self._hass.loop)
@callback
def service_executed(event):
"""Handle an executed service."""
if event.data[ATTR_SERVICE_CALL_ID] == call_id:
fut.set_result(True)
unsub = self._hass.bus.async_listen(
EVENT_SERVICE_EXECUTED, service_executed)
self._hass.bus.async_fire(EVENT_CALL_SERVICE, event_data)
if blocking:
done, _ = await asyncio.wait(
[fut], loop=self._hass.loop, timeout=SERVICE_CALL_LIMIT)
success = bool(done)
unsub()
return success
async def _event_to_service_call(self, event):
"""Handle the SERVICE_CALLED events from the EventBus."""
service_data = event.data.get(ATTR_SERVICE_DATA) or {}
domain = event.data.get(ATTR_DOMAIN).lower()
service = event.data.get(ATTR_SERVICE).lower()
call_id = event.data.get(ATTR_SERVICE_CALL_ID)
if not self.has_service(domain, service):
if event.origin == EventOrigin.local:
_LOGGER.warning("Unable to find service %s/%s",
domain, service)
return
service_handler = self._services[domain][service]
def fire_service_executed():
"""Fire service executed event."""
if not call_id:
return
data = {ATTR_SERVICE_CALL_ID: call_id}
if (service_handler.is_coroutinefunction or
service_handler.is_callback):
self._hass.bus.async_fire(EVENT_SERVICE_EXECUTED, data)
else:
self._hass.bus.fire(EVENT_SERVICE_EXECUTED, data)
try:
if service_handler.schema:
service_data = service_handler.schema(service_data)
except vol.Invalid as ex:
_LOGGER.error("Invalid service data for %s.%s: %s",
domain, service, humanize_error(service_data, ex))
fire_service_executed()
return
service_call = ServiceCall(domain, service, service_data, call_id)
try:
if service_handler.is_callback:
service_handler.func(service_call)
fire_service_executed()
elif service_handler.is_coroutinefunction:
await service_handler.func(service_call)
fire_service_executed()
else:
def execute_service():
"""Execute a service and fires a SERVICE_EXECUTED event."""
service_handler.func(service_call)
fire_service_executed()
await self._hass.async_add_job(execute_service)
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error executing service %s', service_call)
class Config(object):
"""Configuration settings for Home Assistant."""
def __init__(self):
"""Initialize a new config object."""
self.latitude = None # type: Optional[float]
self.longitude = None # type: Optional[float]
self.elevation = None # type: Optional[int]
self.location_name = None # type: Optional[str]
self.time_zone = None # type: Optional[str]
self.units = METRIC_SYSTEM # type: UnitSystem
# If True, pip install is skipped for requirements on startup
self.skip_pip = False # type: bool
# List of loaded components
self.components = set()
# Remote.API object pointing at local API
self.api = None
# Directory that holds the configuration
self.config_dir = None
# List of allowed external dirs to access
self.whitelist_external_dirs = set()
def distance(self: object, lat: float, lon: float) -> float:
"""Calculate distance from Home Assistant.
Async friendly.
"""
return self.units.length(
location.distance(self.latitude, self.longitude, lat, lon), 'm')
def path(self, *path):
"""Generate path to the file within the configuration directory.
Async friendly.
"""
if self.config_dir is None:
raise HomeAssistantError("config_dir is not set")
return os.path.join(self.config_dir, *path)
def is_allowed_path(self, path: str) -> bool:
"""Check if the path is valid for access from outside."""
assert path is not None
parent = pathlib.Path(path)
try:
parent = parent.resolve() # pylint: disable=no-member
except (FileNotFoundError, RuntimeError, PermissionError):
return False
for whitelisted_path in self.whitelist_external_dirs:
try:
parent.relative_to(whitelisted_path)
return True
except ValueError:
pass
return False
def as_dict(self):
"""Create a dictionary representation of this dict.
Async friendly.
"""
time_zone = self.time_zone or dt_util.UTC
return {
'latitude': self.latitude,
'longitude': self.longitude,
'elevation': self.elevation,
'unit_system': self.units.as_dict(),
'location_name': self.location_name,
'time_zone': time_zone.zone,
'components': self.components,
'config_dir': self.config_dir,
'whitelist_external_dirs': self.whitelist_external_dirs,
'version': __version__
}
def _async_create_timer(hass):
"""Create a timer that will start on HOMEASSISTANT_START."""
handle = None
@callback
def fire_time_event(nxt):
"""Fire next time event."""
nonlocal handle
hass.bus.async_fire(EVENT_TIME_CHANGED,
{ATTR_NOW: dt_util.utcnow()})
nxt += 1
slp_seconds = nxt - monotonic()
if slp_seconds < 0:
_LOGGER.error('Timer got out of sync. Resetting')
nxt = monotonic() + 1
slp_seconds = 1
handle = hass.loop.call_later(slp_seconds, fire_time_event, nxt)
@callback
def stop_timer(event):
"""Stop the timer."""
if handle is not None:
handle.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
_LOGGER.info("Timer:starting")
fire_time_event(monotonic())
| 33.159612
| 79
| 0.618913
|
0c9e64104536d339140dcc74277977b26b3bedcf
| 2,298
|
py
|
Python
|
mex/benchmark.py
|
coblo/mex
|
d000a5e648d75d752abc3d6f5d099b9377a33c48
|
[
"BSD-3-Clause"
] | 5
|
2018-08-06T07:09:51.000Z
|
2020-02-22T05:21:10.000Z
|
mex/benchmark.py
|
coblo/mex
|
d000a5e648d75d752abc3d6f5d099b9377a33c48
|
[
"BSD-3-Clause"
] | 15
|
2018-03-23T15:40:05.000Z
|
2021-06-10T17:27:00.000Z
|
mex/benchmark.py
|
coblo/mex
|
d000a5e648d75d752abc3d6f5d099b9377a33c48
|
[
"BSD-3-Clause"
] | 4
|
2019-06-13T21:43:09.000Z
|
2021-04-03T05:09:11.000Z
|
# -*- coding: utf-8 -*-
"""
Benchmarks to get a feeling for the bootlenecks.
Testnet rpc scan results at node height 59354:
RPC listblocks: 7.382926345811435
RPC listblocks verbose: 27.969617406454475
RPC full getblock scan verbose=1: 135.90371246640606
RPC full getblock scan verbose=4: 180.16512355423876
RPC full getrawtransaction scan verbose=4: 369.658727571638
Insight:
`getblock` with verbose=4 includes transaction details. We can avoid api
call overhead by not calling `getrawtransaction` for each individual tx.
"""
import logging
import timeit
from mex.rpc import get_client
log = logging.getLogger("mex.benchmark")
def benchmark_rpc():
api = get_client()
node_height = api.getblockcount()["result"]
log.info(f"Node height: {node_height}")
log.info("Starting benchmark. Please be patient!")
start = timeit.default_timer()
blocks = api.listblocks("-" + str(node_height), verbose=False)["result"]
stop = timeit.default_timer()
runtime = stop - start
log.info(f"RPC listblocks: {runtime}")
start = timeit.default_timer()
blocks = api.listblocks("-" + str(node_height), verbose=True)["result"]
stop = timeit.default_timer()
runtime = stop - start
log.info(f"RPC listblocks verbose: {runtime}")
block_hashes = [item["hash"] for item in blocks]
tx_hashes = []
start = timeit.default_timer()
for block_hash in block_hashes:
data = api.getblock(block_hash, verbose=1)["result"]["tx"]
tx_hashes.extend(data) # pre-collect for getrawtransactions
stop = timeit.default_timer()
runtime = stop - start
log.info(f"RPC full getblock scan verbose=1: {runtime}")
start = timeit.default_timer()
for block_hash in block_hashes:
data = api.getblock(block_hash, verbose=4)
stop = timeit.default_timer()
runtime = stop - start
log.info(f"RPC full getblock scan verbose=4: {runtime}")
start = timeit.default_timer()
for tx_hash in tx_hashes:
data = api.getrawtransaction(tx_hash, verbose=1)["result"]
stop = timeit.default_timer()
runtime = stop - start
log.info(f"RPC full getrawtransaction scan verbose=4: {runtime}")
if __name__ == "__main__":
from mex.tools import init_logging, batchwise
init_logging()
benchmark_rpc()
| 30.64
| 76
| 0.702785
|
2c6b59357665f5ea962921e6065e5731ac290c55
| 10,977
|
py
|
Python
|
alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/network/plugins/custom_wf_plugin/plugin/workflows.py
|
alien4cloud/alien4cloud-cloudify4-provider
|
97faee855255eb0c3ce25bb3075c29acd11a63c5
|
[
"Apache-2.0"
] | null | null | null |
alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/network/plugins/custom_wf_plugin/plugin/workflows.py
|
alien4cloud/alien4cloud-cloudify4-provider
|
97faee855255eb0c3ce25bb3075c29acd11a63c5
|
[
"Apache-2.0"
] | 3
|
2015-12-04T15:27:22.000Z
|
2016-04-08T11:32:43.000Z
|
alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/network/plugins/custom_wf_plugin/plugin/workflows.py
|
alien4cloud/alien4cloud-cloudify4-provider
|
97faee855255eb0c3ce25bb3075c29acd11a63c5
|
[
"Apache-2.0"
] | 16
|
2015-01-29T10:05:09.000Z
|
2019-06-24T19:23:54.000Z
|
from cloudify.decorators import workflow
from cloudify.workflows import ctx
from cloudify.workflows import tasks as workflow_tasks
from utils import set_state_task
from utils import operation_task
from utils import link_tasks
from utils import CustomContext
from utils import generate_native_node_workflows
from utils import _get_all_nodes
from utils import _get_all_nodes_instances
from utils import _get_all_modified_node_instances
from utils import is_host_node
from workflow import WfStartEvent
from workflow import build_pre_event
# subworkflow 'install' for host 'Compute'
def install_host_compute(ctx, graph, custom_context):
custom_context.register_native_delegate_wf_step('Compute', 'Compute_install')
generate_native_node_workflows(ctx, graph, custom_context, 'install')
# subworkflow 'uninstall' for host 'Compute'
def uninstall_host_compute(ctx, graph, custom_context):
custom_context.register_native_delegate_wf_step('Compute', 'Compute_uninstall')
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
def install_host(ctx, graph, custom_context, compute):
options = {}
options['Compute'] = install_host_compute
options[compute](ctx, graph, custom_context)
def uninstall_host(ctx, graph, custom_context, compute):
options = {}
options['Compute'] = uninstall_host_compute
options[compute](ctx, graph, custom_context)
@workflow
def a4c_install(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('install')))
_a4c_install(ctx, graph, custom_context)
return graph.execute()
@workflow
def a4c_uninstall(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('uninstall')))
_a4c_uninstall(ctx, graph, custom_context)
return graph.execute()
def _a4c_install(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_install')
custom_context.register_native_delegate_wf_step('Compute', 'Compute_install')
custom_context.register_native_delegate_wf_step('InternalNetwork', 'InternalNetwork_install')
generate_native_node_workflows(ctx, graph, custom_context, 'install')
def _a4c_uninstall(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.register_native_delegate_wf_step('Compute', 'Compute_uninstall')
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_uninstall')
custom_context.register_native_delegate_wf_step('InternalNetwork', 'InternalNetwork_uninstall')
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
def _get_scaling_group_name_from_node_id(ctx, node_id):
scaling_groups=ctx.deployment.scaling_groups
for group_name, scaling_group in ctx.deployment.scaling_groups.iteritems():
for member in scaling_group['members']:
if member == node_id:
ctx.logger.info("Node {} found in scaling group {}".format(node_id, group_name))
return group_name
return None
@workflow
def a4c_scale(ctx, node_id, delta, scale_compute, **kwargs):
delta = int(delta)
scalable_entity_name = _get_scaling_group_name_from_node_id(ctx, node_id)
scaling_group = ctx.deployment.scaling_groups.get(scalable_entity_name)
if scalable_entity_name:
curr_num_instances = scaling_group['properties']['current_instances']
planned_num_instances = curr_num_instances + delta
scale_id = scalable_entity_name
else:
scaled_node = ctx.get_node(scalable_entity_name)
if not scaled_node:
raise ValueError("Node {0} doesn't exist".format(scalable_entity_name))
if not is_host_node(scaled_node):
raise ValueError("Node {0} is not a host. This workflow can only scale hosts".format(scalable_entity_name))
if delta == 0:
ctx.logger.info('delta parameter is 0, so no scaling will take place.')
return
curr_num_instances = scaled_node.number_of_instances
planned_num_instances = curr_num_instances + delta
scale_id = scaled_node.id
if planned_num_instances < 1:
raise ValueError('Provided delta: {0} is illegal. current number of'
'instances of node/group {1} is {2}'
.format(delta, scalable_entity_name, curr_num_instances))
modification = ctx.deployment.start_modification({
scale_id: {
'instances': planned_num_instances
}
})
ctx.logger.info('Deployment modification started. [modification_id={0} : {1}]'.format(modification.id, dir(modification)))
try:
if delta > 0:
ctx.logger.info('Scaling host/group {0} adding {1} instances'.format(scalable_entity_name, delta))
added_and_related = _get_all_nodes(modification.added)
added = _get_all_modified_node_instances(added_and_related, 'added')
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('scale', 'install')))
custom_context = CustomContext(ctx, added, added_and_related)
install_host(ctx, graph, custom_context, node_id)
try:
graph.execute()
except:
ctx.logger.error('Scale failed. Uninstalling node/group {0}'.format(scalable_entity_name))
graph = ctx.internal.task_graph
for task in graph.tasks_iter():
graph.remove_task(task)
try:
custom_context = CustomContext(ctx, added, added_and_related)
uninstall_host(ctx, graph, custom_context, scalable_entity_name)
graph.execute()
except:
ctx.logger.error('Node {0} uninstallation following scale failure has failed'.format(scalable_entity_name))
raise
else:
ctx.logger.info('Unscaling host/group {0} removing {1} instances'.format(scalable_entity_name, delta))
removed_and_related = _get_all_nodes(modification.removed)
removed = _get_all_modified_node_instances(removed_and_related, 'removed')
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('scale', 'uninstall')))
custom_context = CustomContext(ctx, removed, removed_and_related)
uninstall_host(ctx, graph, custom_context, node_id)
try:
graph.execute()
except:
ctx.logger.error('Unscale failed.')
raise
except:
ctx.logger.warn('Rolling back deployment modification. [modification_id={0}]'.format(modification.id))
try:
modification.rollback()
except:
ctx.logger.warn('Deployment modification rollback failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
raise
else:
try:
modification.finish()
except:
ctx.logger.warn('Deployment modification finish failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
@workflow
def a4c_heal(
ctx,
node_instance_id,
diagnose_value='Not provided',
**kwargs):
"""Reinstalls the whole subgraph of the system topology
The subgraph consists of all the nodes that are hosted in the
failing node's compute and the compute itself.
Additionally it unlinks and establishes appropriate relationships
:param ctx: cloudify context
:param node_id: failing node's id
:param diagnose_value: diagnosed reason of failure
"""
ctx.logger.info("Starting 'heal' workflow on {0}, Diagnosis: {1}"
.format(node_instance_id, diagnose_value))
failing_node = ctx.get_node_instance(node_instance_id)
host_instance_id = failing_node._node_instance.host_id
failing_node_host = ctx.get_node_instance(host_instance_id)
node_id = failing_node_host.node_id
subgraph_node_instances = failing_node_host.get_contained_subgraph()
added_and_related = _get_all_nodes(ctx)
try:
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('heal', 'uninstall')))
custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
uninstall_host(ctx, graph, custom_context, node_id)
graph.execute()
except:
ctx.logger.error('Uninstall while healing failed.')
graph = ctx.internal.task_graph
for task in graph.tasks_iter():
graph.remove_task(task)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('heal', 'install')))
custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
install_host(ctx, graph, custom_context, node_id)
graph.execute()
#following code can be pasted in src/test/python/workflows/context.py for simulation
#def _build_nodes(ctx):
#types = []
#types.append('alien.nodes.openstack.PrivateNetwork')
#types.append('alien.nodes.PrivateNetwork')
#types.append('tosca.nodes.Network')
#types.append('tosca.nodes.Root')
#node_InternalNetwork = _build_node(ctx, 'InternalNetwork', types, 1)
#types = []
#types.append('alien.nodes.openstack.Compute')
#types.append('tosca.nodes.Compute')
#types.append('tosca.nodes.Root')
#node_Compute = _build_node(ctx, 'Compute', types, 1)
#types = []
#types.append('alien.nodes.openstack.PublicNetwork')
#types.append('alien.nodes.PublicNetwork')
#types.append('tosca.nodes.Network')
#types.append('tosca.nodes.Root')
#node_NetPub = _build_node(ctx, 'NetPub', types, 1)
#_add_relationship(node_Compute, node_NetPub)
#_add_relationship(node_Compute, node_InternalNetwork)
| 45.17284
| 127
| 0.69099
|
9944c05aefebc33a7c9ae5921b67e517e76845bc
| 544
|
py
|
Python
|
LeetCode/0014-longest-common-prefix.py
|
tushar-1728/Coding
|
2df9da02cf3e5d4af5b47faf02a07ba54b3297cb
|
[
"MIT"
] | null | null | null |
LeetCode/0014-longest-common-prefix.py
|
tushar-1728/Coding
|
2df9da02cf3e5d4af5b47faf02a07ba54b3297cb
|
[
"MIT"
] | null | null | null |
LeetCode/0014-longest-common-prefix.py
|
tushar-1728/Coding
|
2df9da02cf3e5d4af5b47faf02a07ba54b3297cb
|
[
"MIT"
] | null | null | null |
class Solution:
def longestCommonPrefix(self, strs):
if(len(strs) > 0):
strs.sort(key=len)
size = len(strs)
temp = strs[0]
i = 1
j = len(temp)
while(i < size and j >= 0):
if(strs[i].startswith(temp)):
i += 1
else:
j -= 1
temp = temp[:j:]
return temp
else:
return ""
b = Solution()
print(b.longestCommonPrefix(["dog","racecar","car"]))
| 25.904762
| 53
| 0.398897
|
26235b152907696952946087c1ae1097bbc82b22
| 17,555
|
py
|
Python
|
virtualenv/Lib/site-packages/pyrr/matrix44.py
|
HotShot0901/PI
|
7e6fd0f68b4222e09ea825f27709ec5b1e51e928
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | 1
|
2020-04-15T02:43:16.000Z
|
2020-04-15T02:43:16.000Z
|
virtualenv/Lib/site-packages/pyrr/matrix44.py
|
HotShot0901/PI
|
7e6fd0f68b4222e09ea825f27709ec5b1e51e928
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | null | null | null |
virtualenv/Lib/site-packages/pyrr/matrix44.py
|
HotShot0901/PI
|
7e6fd0f68b4222e09ea825f27709ec5b1e51e928
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | 1
|
2018-06-07T22:31:11.000Z
|
2018-06-07T22:31:11.000Z
|
# -*- coding: utf-8 -*-
"""4x4 Matrix which supports rotation, translation, scale and skew.
Matrices are laid out in row-major format and can be loaded directly
into OpenGL.
To convert to column-major format, transpose the array using the
numpy.array.T method.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from . import matrix33
from . import vector
from . import vector3
from . import quaternion
from .utils import all_parameters_as_numpy_arrays, parameters_as_numpy_arrays
def create_identity(dtype=None):
"""Creates a new matrix44 and sets it to
an identity matrix.
:rtype: numpy.array
:return: A matrix representing an identity matrix with shape (4,4).
"""
return np.identity(4, dtype=dtype)
def create_from_matrix33(mat, dtype=None):
"""Creates a Matrix44 from a Matrix33.
The translation will be 0,0,0.
:rtype: numpy.array
:return: A matrix with shape (4,4) with the input matrix rotation.
"""
mat4 = np.identity(4, dtype=dtype)
mat4[0:3, 0:3] = mat
return mat4
def create_matrix33_view(mat):
"""Returns a view into the matrix in Matrix33 format.
This is different from matrix33.create_from_matrix44, in that
changes to the returned matrix will also alter the original matrix.
:rtype: numpy.array
:return: A view into the matrix in the format of a matrix33 (shape (3,3)).
"""
return mat[0:3, 0:3]
@parameters_as_numpy_arrays('eulers')
def create_from_eulers(eulers, dtype=None):
"""Creates a matrix from the specified Euler rotations.
:param numpy.array eulers: A set of euler rotations in the format
specified by the euler modules.
:rtype: numpy.array
:return: A matrix with shape (4,4) with the euler's rotation.
"""
dtype = dtype or eulers.dtype
# set to identity matrix
# this will populate our extra rows for us
mat = create_identity(dtype)
# we'll use Matrix33 for our conversion
mat[0:3, 0:3] = matrix33.create_from_eulers(eulers, dtype)
return mat
@parameters_as_numpy_arrays('axis')
def create_from_axis_rotation(axis, theta, dtype=None):
"""Creates a matrix from the specified rotation theta around an axis.
:param numpy.array axis: A (3,) vector.
:param float theta: A rotation in radians.
:rtype: numpy.array
:return: A matrix with shape (4,4).
"""
dtype = dtype or axis.dtype
# set to identity matrix
# this will populate our extra rows for us
mat = create_identity(dtype)
# we'll use Matrix33 for our conversion
mat[0:3, 0:3] = matrix33.create_from_axis_rotation(axis, theta, dtype)
return mat
@parameters_as_numpy_arrays('quat')
def create_from_quaternion(quat, dtype=None):
"""Creates a matrix with the same rotation as a quaternion.
:param quat: The quaternion to create the matrix from.
:rtype: numpy.array
:return: A matrix with shape (4,4) with the quaternion's rotation.
"""
dtype = dtype or quat.dtype
# set to identity matrix
# this will populate our extra rows for us
mat = create_identity(dtype)
# we'll use Matrix33 for our conversion
mat[0:3, 0:3] = matrix33.create_from_quaternion(quat, dtype)
return mat
@parameters_as_numpy_arrays('quat')
def create_from_inverse_of_quaternion(quat, dtype=None):
"""Creates a matrix with the inverse rotation of a quaternion.
This can be used to go from object space to intertial space.
:param numpy.array quat: The quaternion to make the matrix from (shape 4).
:rtype: numpy.array
:return: A matrix with shape (4,4) that respresents the inverse of
the quaternion.
"""
dtype = dtype or quat.dtype
# set to identity matrix
# this will populate our extra rows for us
mat = create_identity(dtype)
# we'll use Matrix33 for our conversion
mat[0:3, 0:3] = matrix33.create_from_inverse_of_quaternion(quat, dtype)
return mat
@parameters_as_numpy_arrays('vec')
def create_from_translation(vec, dtype=None):
"""Creates an identity matrix with the translation set.
:param numpy.array vec: The translation vector (shape 3 or 4).
:rtype: numpy.array
:return: A matrix with shape (4,4) that represents a matrix
with the translation set to the specified vector.
"""
dtype = dtype or vec.dtype
mat = create_identity(dtype)
mat[3, 0:3] = vec[:3]
return mat
def create_from_scale(scale, dtype=None):
"""Creates an identity matrix with the scale set.
:param numpy.array scale: The scale to apply as a vector (shape 3).
:rtype: numpy.array
:return: A matrix with shape (4,4) with the scale
set to the specified vector.
"""
# we need to expand 'scale' into it's components
# because numpy isn't flattening them properly.
m = np.diagflat([scale[0], scale[1], scale[2], 1.0])
if dtype:
m = m.astype(dtype)
return m
def create_from_x_rotation(theta, dtype=None):
"""Creates a matrix with the specified rotation about the X axis.
:param float theta: The rotation, in radians, about the X-axis.
:rtype: numpy.array
:return: A matrix with the shape (4,4) with the specified rotation about
the X-axis.
.. seealso:: http://en.wikipedia.org/wiki/Rotation_matrix#In_three_dimensions
"""
mat = create_identity(dtype)
mat[0:3, 0:3] = matrix33.create_from_x_rotation(theta, dtype)
return mat
def create_from_y_rotation(theta, dtype=None):
"""Creates a matrix with the specified rotation about the Y axis.
:param float theta: The rotation, in radians, about the Y-axis.
:rtype: numpy.array
:return: A matrix with the shape (4,4) with the specified rotation about
the Y-axis.
.. seealso:: http://en.wikipedia.org/wiki/Rotation_matrix#In_three_dimensions
"""
mat = create_identity(dtype)
mat[0:3, 0:3] = matrix33.create_from_y_rotation(theta, dtype)
return mat
def create_from_z_rotation(theta, dtype=None):
"""Creates a matrix with the specified rotation about the Z axis.
:param float theta: The rotation, in radians, about the Z-axis.
:rtype: numpy.array
:return: A matrix with the shape (4,4) with the specified rotation about
the Z-axis.
.. seealso:: http://en.wikipedia.org/wiki/Rotation_matrix#In_three_dimensions
"""
mat = create_identity(dtype)
mat[0:3, 0:3] = matrix33.create_from_z_rotation(theta, dtype)
return mat
@all_parameters_as_numpy_arrays
def apply_to_vector(mat, vec):
"""Apply a matrix to a vector.
The matrix's rotation and translation are applied to the vector.
Supports multiple matrices and vectors.
:param numpy.array mat: The rotation / translation matrix.
Can be a list of matrices.
:param numpy.array vec: The vector to modify.
Can be a list of vectors.
:rtype: numpy.array
:return: The vectors rotated by the specified matrix.
"""
if vec.size == 3:
# convert to a vec4
vec4 = np.array([vec[0], vec[1], vec[2], 1.], dtype=vec.dtype)
vec4 = np.dot(vec4, mat)
if np.allclose(vec4[3], 0.):
vec4[:] = [np.inf, np.inf, np.inf, np.inf]
else:
vec4 /= vec4[3]
return vec4[:3]
elif vec.size == 4:
return np.dot(vec, mat)
else:
raise ValueError("Vector size unsupported")
def multiply(m1, m2):
"""Multiply two matricies, m1 . m2.
This is essentially a wrapper around
numpy.dot(m1, m2)
:param numpy.array m1: The first matrix.
Can be a list of matrices.
:param numpy.array m2: The second matrix.
Can be a list of matrices.
:rtype: numpy.array
:return: A matrix that results from multiplying m1 by m2.
"""
return np.dot(m1, m2)
def create_perspective_projection(fovy, aspect, near, far, dtype=None):
"""Creates perspective projection matrix.
.. seealso:: http://www.opengl.org/sdk/docs/man2/xhtml/gluPerspective.xml
.. seealso:: http://www.geeks3d.com/20090729/howto-perspective-projection-matrix-in-opengl/
:param float fovy: field of view in y direction in degrees
:param float aspect: aspect ratio of the view (width / height)
:param float near: distance from the viewer to the near clipping plane (only positive)
:param float far: distance from the viewer to the far clipping plane (only positive)
:rtype: numpy.array
:return: A projection matrix representing the specified perpective.
"""
ymax = near * np.tan(fovy * np.pi / 360.0)
xmax = ymax * aspect
return create_perspective_projection_from_bounds(-xmax, xmax, -ymax, ymax, near, far, dtype=dtype)
def create_perspective_projection_matrix(fovy, aspect, near, far, dtype=None): # TDOO: mark as deprecated
"""Creates perspective projection matrix.
.. seealso:: http://www.opengl.org/sdk/docs/man2/xhtml/gluPerspective.xml
.. seealso:: http://www.geeks3d.com/20090729/howto-perspective-projection-matrix-in-opengl/
:param float fovy: field of view in y direction in degrees
:param float aspect: aspect ratio of the view (width / height)
:param float near: distance from the viewer to the near clipping plane (only positive)
:param float far: distance from the viewer to the far clipping plane (only positive)
:rtype: numpy.array
:return: A projection matrix representing the specified perpective.
"""
return create_perspective_projection(fovy, aspect, near, far, dtype)
def create_perspective_projection_from_bounds(
left,
right,
bottom,
top,
near,
far,
dtype=None
):
"""Creates a perspective projection matrix using the specified near
plane dimensions.
:param float left: The left of the near plane relative to the plane's centre.
:param float right: The right of the near plane relative to the plane's centre.
:param float top: The top of the near plane relative to the plane's centre.
:param float bottom: The bottom of the near plane relative to the plane's centre.
:param float near: The distance of the near plane from the camera's origin.
It is recommended that the near plane is set to 1.0 or above to avoid rendering issues
at close range.
:param float far: The distance of the far plane from the camera's origin.
:rtype: numpy.array
:return: A projection matrix representing the specified perspective.
.. seealso:: http://www.gamedev.net/topic/264248-building-a-projection-matrix-without-api/
.. seealso:: http://www.glprogramming.com/red/chapter03.html
"""
"""
E 0 A 0
0 F B 0
0 0 C D
0 0-1 0
A = (right+left)/(right-left)
B = (top+bottom)/(top-bottom)
C = -(far+near)/(far-near)
D = -2*far*near/(far-near)
E = 2*near/(right-left)
F = 2*near/(top-bottom)
"""
A = (right + left) / (right - left)
B = (top + bottom) / (top - bottom)
C = -(far + near) / (far - near)
D = -2. * far * near / (far - near)
E = 2. * near / (right - left)
F = 2. * near / (top - bottom)
return np.array((
( E, 0., 0., 0.),
( 0., F, 0., 0.),
( A, B, C,-1.),
( 0., 0., D, 0.),
), dtype=dtype)
def create_perspective_projection_matrix_from_bounds(
left, right, bottom, top, near, far, dtype=None): # TDOO: mark as deprecated
"""Creates a perspective projection matrix using the specified near
plane dimensions.
:param float left: The left of the near plane relative to the plane's centre.
:param float right: The right of the near plane relative to the plane's centre.
:param float top: The top of the near plane relative to the plane's centre.
:param float bottom: The bottom of the near plane relative to the plane's centre.
:param float near: The distance of the near plane from the camera's origin.
It is recommended that the near plane is set to 1.0 or above to avoid rendering issues
at close range.
:param float far: The distance of the far plane from the camera's origin.
:rtype: numpy.array
:return: A projection matrix representing the specified perspective.
.. seealso:: http://www.gamedev.net/topic/264248-building-a-projection-matrix-without-api/
.. seealso:: http://www.glprogramming.com/red/chapter03.html
"""
"""
E 0 A 0
0 F B 0
0 0 C D
0 0-1 0
A = (right+left)/(right-left)
B = (top+bottom)/(top-bottom)
C = -(far+near)/(far-near)
D = -2*far*near/(far-near)
E = 2*near/(right-left)
F = 2*near/(top-bottom)
"""
return create_perspective_projection_from_bounds(
left, right, bottom, top, near, far, dtype
)
def create_orthogonal_projection(
left,
right,
bottom,
top,
near,
far,
dtype=None
):
"""Creates an orthogonal projection matrix.
:param float left: The left of the near plane relative to the plane's centre.
:param float right: The right of the near plane relative to the plane's centre.
:param float top: The top of the near plane relative to the plane's centre.
:param float bottom: The bottom of the near plane relative to the plane's centre.
:param float near: The distance of the near plane from the camera's origin.
It is recommended that the near plane is set to 1.0 or above to avoid rendering issues
at close range.
:param float far: The distance of the far plane from the camera's origin.
:rtype: numpy.array
:return: A projection matrix representing the specified orthogonal perspective.
.. seealso:: http://msdn.microsoft.com/en-us/library/dd373965(v=vs.85).aspx
"""
"""
A 0 0 Tx
0 B 0 Ty
0 0 C Tz
0 0 0 1
A = 2 / (right - left)
B = 2 / (top - bottom)
C = -2 / (far - near)
Tx = (right + left) / (right - left)
Ty = (top + bottom) / (top - bottom)
Tz = (far + near) / (far - near)
"""
rml = right - left
tmb = top - bottom
fmn = far - near
A = 2. / rml
B = 2. / tmb
C = -2. / fmn
Tx = -(right + left) / rml
Ty = -(top + bottom) / tmb
Tz = -(far + near) / fmn
return np.array((
( A, 0., 0., 0.),
(0., B, 0., 0.),
(0., 0., C, 0.),
(Tx, Ty, Tz, 1.),
), dtype=dtype)
def create_orthogonal_projection_matrix(
left, right, bottom, top, near, far, dtype=None): # TDOO: mark as deprecated
"""Creates an orthogonal projection matrix.
:param float left: The left of the near plane relative to the plane's centre.
:param float right: The right of the near plane relative to the plane's centre.
:param float top: The top of the near plane relative to the plane's centre.
:param float bottom: The bottom of the near plane relative to the plane's centre.
:param float near: The distance of the near plane from the camera's origin.
It is recommended that the near plane is set to 1.0 or above to avoid rendering issues
at close range.
:param float far: The distance of the far plane from the camera's origin.
:rtype: numpy.array
:return: A projection matrix representing the specified orthogonal perspective.
.. seealso:: http://msdn.microsoft.com/en-us/library/dd373965(v=vs.85).aspx
"""
"""
A 0 0 Tx
0 B 0 Ty
0 0 C Tz
0 0 0 1
A = 2 / (right - left)
B = 2 / (top - bottom)
C = -2 / (far - near)
Tx = (right + left) / (right - left)
Ty = (top + bottom) / (top - bottom)
Tz = (far + near) / (far - near)
"""
return create_orthogonal_projection(
left, right, bottom, top, near, far, dtype
)
def create_look_at(eye, target, up, dtype=None):
"""Creates a look at matrix according to OpenGL standards.
:param numpy.array eye: Position of the camera in world coordinates.
:param numpy.array target: The position in world coordinates that the
camera is looking at.
:param numpy.array up: The up vector of the camera.
:rtype: numpy.array
:return: A look at matrix that can be used as a viewMatrix
"""
eye = np.asarray(eye)
target = np.asarray(target)
up = np.asarray(up)
forward = vector.normalize(target - eye)
side = vector.normalize(np.cross(forward, up))
up = vector.normalize(np.cross(side, forward))
return np.array((
(side[0], up[0], -forward[0], 0.),
(side[1], up[1], -forward[1], 0.),
(side[2], up[2], -forward[2], 0.),
(-np.dot(side, eye), -np.dot(up, eye), np.dot(forward, eye), 1.0)
), dtype=dtype)
def inverse(m):
"""Returns the inverse of the matrix.
This is essentially a wrapper around numpy.linalg.inv.
:param numpy.array m: A matrix.
:rtype: numpy.array
:return: The inverse of the specified matrix.
.. seealso:: http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.inv.html
"""
return np.linalg.inv(m)
def decompose(m):
"""Decomposes an affine transformation matrix into its scale, rotation and
translation components.
:param numpy.array m: A matrix.
:return: tuple (scale, rotation, translation)
numpy.array scale vector3
numpy.array rotation quaternion
numpy.array translation vector3
"""
m = np.asarray(m)
scale = np.linalg.norm(m[:3, :3], axis=1)
det = np.linalg.det(m)
if det < 0:
scale[0] *= -1
position = m[3, :3]
rotation = m[:3, :3] * (1 / scale)[:, None]
return scale, quaternion.create_from_matrix(rotation), position
| 33.889961
| 108
| 0.66078
|
a500d12162f40cac6c4c3576030cd5f518b9a14e
| 238
|
py
|
Python
|
fastreid/modeling/heads/__init__.py
|
asvk/fast-reid
|
cf246e9bee5b5e5d154de98ba0395b7a5d0d0ab7
|
[
"Apache-2.0"
] | null | null | null |
fastreid/modeling/heads/__init__.py
|
asvk/fast-reid
|
cf246e9bee5b5e5d154de98ba0395b7a5d0d0ab7
|
[
"Apache-2.0"
] | null | null | null |
fastreid/modeling/heads/__init__.py
|
asvk/fast-reid
|
cf246e9bee5b5e5d154de98ba0395b7a5d0d0ab7
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
from .build import REID_HEADS_REGISTRY, build_reid_heads
# import all the meta_arch, so they will be registered
from .embedding_head import EmbeddingHead
| 21.636364
| 56
| 0.785714
|
dd05e2022f066666e40a6304b653b76faef676ae
| 10,256
|
py
|
Python
|
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/pandas/core/dtypes/inference.py
|
tanerqy/coffeegrindsize
|
57f6c48213afda2704478b3fc2d0749332ca9d0e
|
[
"MIT"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
Library/lib/python3.7/site-packages/pandas-0.24.2-py3.7-macosx-10.9-x86_64.egg/pandas/core/dtypes/inference.py
|
gengyong/Carnets
|
8930a14f69360d4db115a85ff9e0f6efa80fa2e7
|
[
"BSD-3-Clause"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
venv/Lib/site-packages/pandas/core/dtypes/inference.py
|
jaykang-heo/poseAnalysis
|
34cfac4a889e2c973651c1c07740ea0908542d68
|
[
"MIT"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
""" basic inference routines """
from numbers import Number
import re
import numpy as np
from pandas._libs import lib
from pandas.compat import (
PY2, Set, re_type, string_and_binary_types, string_types, text_type)
from pandas import compat
is_bool = lib.is_bool
is_integer = lib.is_integer
is_float = lib.is_float
is_complex = lib.is_complex
is_scalar = lib.is_scalar
is_decimal = lib.is_decimal
is_interval = lib.is_interval
def is_number(obj):
"""
Check if the object is a number.
Returns True when the object is a number, and False if is not.
Parameters
----------
obj : any type
The object to check if is a number.
Returns
-------
is_number : bool
Whether `obj` is a number or not.
See Also
--------
pandas.api.types.is_integer: Checks a subgroup of numbers.
Examples
--------
>>> pd.api.types.is_number(1)
True
>>> pd.api.types.is_number(7.15)
True
Booleans are valid because they are int subclass.
>>> pd.api.types.is_number(False)
True
>>> pd.api.types.is_number("foo")
False
>>> pd.api.types.is_number("5")
False
"""
return isinstance(obj, (Number, np.number))
def is_string_like(obj):
"""
Check if the object is a string.
Parameters
----------
obj : The object to check
Examples
--------
>>> is_string_like("foo")
True
>>> is_string_like(1)
False
Returns
-------
is_str_like : bool
Whether `obj` is a string or not.
"""
return isinstance(obj, (text_type, string_types))
def _iterable_not_string(obj):
"""
Check if the object is an iterable but not a string.
Parameters
----------
obj : The object to check.
Returns
-------
is_iter_not_string : bool
Whether `obj` is a non-string iterable.
Examples
--------
>>> _iterable_not_string([1, 2, 3])
True
>>> _iterable_not_string("foo")
False
>>> _iterable_not_string(1)
False
"""
return (isinstance(obj, compat.Iterable) and
not isinstance(obj, string_types))
def is_iterator(obj):
"""
Check if the object is an iterator.
For example, lists are considered iterators
but not strings or datetime objects.
Parameters
----------
obj : The object to check
Returns
-------
is_iter : bool
Whether `obj` is an iterator.
Examples
--------
>>> is_iterator([1, 2, 3])
True
>>> is_iterator(datetime(2017, 1, 1))
False
>>> is_iterator("foo")
False
>>> is_iterator(1)
False
"""
if not hasattr(obj, '__iter__'):
return False
if PY2:
return hasattr(obj, 'next')
else:
# Python 3 generators have
# __next__ instead of next
return hasattr(obj, '__next__')
def is_file_like(obj):
"""
Check if the object is a file-like object.
For objects to be considered file-like, they must
be an iterator AND have either a `read` and/or `write`
method as an attribute.
Note: file-like objects must be iterable, but
iterable objects need not be file-like.
.. versionadded:: 0.20.0
Parameters
----------
obj : The object to check
Returns
-------
is_file_like : bool
Whether `obj` has file-like properties.
Examples
--------
>>> buffer(StringIO("data"))
>>> is_file_like(buffer)
True
>>> is_file_like([1, 2, 3])
False
"""
if not (hasattr(obj, 'read') or hasattr(obj, 'write')):
return False
if not hasattr(obj, "__iter__"):
return False
return True
def is_re(obj):
"""
Check if the object is a regex pattern instance.
Parameters
----------
obj : The object to check
Returns
-------
is_regex : bool
Whether `obj` is a regex pattern.
Examples
--------
>>> is_re(re.compile(".*"))
True
>>> is_re("foo")
False
"""
return isinstance(obj, re_type)
def is_re_compilable(obj):
"""
Check if the object can be compiled into a regex pattern instance.
Parameters
----------
obj : The object to check
Returns
-------
is_regex_compilable : bool
Whether `obj` can be compiled as a regex pattern.
Examples
--------
>>> is_re_compilable(".*")
True
>>> is_re_compilable(1)
False
"""
try:
re.compile(obj)
except TypeError:
return False
else:
return True
def is_list_like(obj, allow_sets=True):
"""
Check if the object is list-like.
Objects that are considered list-like are for example Python
lists, tuples, sets, NumPy arrays, and Pandas Series.
Strings and datetime objects, however, are not considered list-like.
Parameters
----------
obj : The object to check
allow_sets : boolean, default True
If this parameter is False, sets will not be considered list-like
.. versionadded:: 0.24.0
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_list_like([1, 2, 3])
True
>>> is_list_like({1, 2, 3})
True
>>> is_list_like(datetime(2017, 1, 1))
False
>>> is_list_like("foo")
False
>>> is_list_like(1)
False
>>> is_list_like(np.array([2]))
True
>>> is_list_like(np.array(2)))
False
"""
return (isinstance(obj, compat.Iterable)
# we do not count strings/unicode/bytes as list-like
and not isinstance(obj, string_and_binary_types)
# exclude zero-dimensional numpy arrays, effectively scalars
and not (isinstance(obj, np.ndarray) and obj.ndim == 0)
# exclude sets if allow_sets is False
and not (allow_sets is False and isinstance(obj, Set)))
def is_array_like(obj):
"""
Check if the object is array-like.
For an object to be considered array-like, it must be list-like and
have a `dtype` attribute.
Parameters
----------
obj : The object to check
Returns
-------
is_array_like : bool
Whether `obj` has array-like properties.
Examples
--------
>>> is_array_like(np.array([1, 2, 3]))
True
>>> is_array_like(pd.Series(["a", "b"]))
True
>>> is_array_like(pd.Index(["2016-01-01"]))
True
>>> is_array_like([1, 2, 3])
False
>>> is_array_like(("a", "b"))
False
"""
return is_list_like(obj) and hasattr(obj, "dtype")
def is_nested_list_like(obj):
"""
Check if the object is list-like, and that all of its elements
are also list-like.
.. versionadded:: 0.20.0
Parameters
----------
obj : The object to check
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_nested_list_like([[1, 2, 3]])
True
>>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])
True
>>> is_nested_list_like(["foo"])
False
>>> is_nested_list_like([])
False
>>> is_nested_list_like([[1, 2, 3], 1])
False
Notes
-----
This won't reliably detect whether a consumable iterator (e. g.
a generator) is a nested-list-like without consuming the iterator.
To avoid consuming it, we always return False if the outer container
doesn't define `__len__`.
See Also
--------
is_list_like
"""
return (is_list_like(obj) and hasattr(obj, '__len__') and
len(obj) > 0 and all(is_list_like(item) for item in obj))
def is_dict_like(obj):
"""
Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True
"""
dict_like_attrs = ("__getitem__", "keys", "__contains__")
return (all(hasattr(obj, attr) for attr in dict_like_attrs)
# [GH 25196] exclude classes
and not isinstance(obj, type))
def is_named_tuple(obj):
"""
Check if the object is a named tuple.
Parameters
----------
obj : The object to check
Returns
-------
is_named_tuple : bool
Whether `obj` is a named tuple.
Examples
--------
>>> Point = namedtuple("Point", ["x", "y"])
>>> p = Point(1, 2)
>>>
>>> is_named_tuple(p)
True
>>> is_named_tuple((1, 2))
False
"""
return isinstance(obj, tuple) and hasattr(obj, '_fields')
def is_hashable(obj):
"""Return True if hash(obj) will succeed, False otherwise.
Some types will pass a test against collections.Hashable but fail when they
are actually hashed with hash().
Distinguish between these and other types by trying the call to hash() and
seeing if they raise TypeError.
Examples
--------
>>> a = ([],)
>>> isinstance(a, collections.Hashable)
True
>>> is_hashable(a)
False
"""
# Unfortunately, we can't use isinstance(obj, collections.Hashable), which
# can be faster than calling hash. That is because numpy scalars on Python
# 3 fail this test.
# Reconsider this decision once this numpy bug is fixed:
# https://github.com/numpy/numpy/issues/5562
try:
hash(obj)
except TypeError:
return False
else:
return True
def is_sequence(obj):
"""
Check if the object is a sequence of objects.
String types are not included as sequences here.
Parameters
----------
obj : The object to check
Returns
-------
is_sequence : bool
Whether `obj` is a sequence of objects.
Examples
--------
>>> l = [1, 2, 3]
>>>
>>> is_sequence(l)
True
>>> is_sequence(iter(l))
False
"""
try:
iter(obj) # Can iterate over it.
len(obj) # Has a length associated with it.
return not isinstance(obj, string_and_binary_types)
except (TypeError, AttributeError):
return False
| 20.512
| 79
| 0.584048
|
62914e3218ac8f59773b390383f98ca583616438
| 2,891
|
py
|
Python
|
ros_atlas/model_processors/HandGestureProcessor.py
|
patrick-ubc/Huawei_HiFly_Drone
|
5dae1b56f49c2b86c3b852bbc5e3a63e84ccd490
|
[
"Apache-2.0"
] | 1
|
2021-09-21T23:23:59.000Z
|
2021-09-21T23:23:59.000Z
|
ros_atlas/model_processors/HandGestureProcessor.py
|
patrick-ubc/Huawei_HiFly_Drone
|
5dae1b56f49c2b86c3b852bbc5e3a63e84ccd490
|
[
"Apache-2.0"
] | 8
|
2021-07-05T21:41:53.000Z
|
2022-02-15T19:46:13.000Z
|
ros_atlas/model_processors/HandGestureProcessor.py
|
patrick-ubc/Huawei_HiFly_Drone
|
5dae1b56f49c2b86c3b852bbc5e3a63e84ccd490
|
[
"Apache-2.0"
] | 4
|
2021-06-11T22:47:04.000Z
|
2021-07-29T19:57:13.000Z
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import cv2
import numpy as np
import sys
from PIL import Image, ImageDraw, ImageFont
from model_processors.BaseProcessor import BaseProcessor
from atlas_utils.acl_dvpp import Dvpp
from atlas_utils.acl_image import AclImage
class ModelProcessor(BaseProcessor):
gesture_categories = [
'0',
'1',
'2',
'3',
'4',
'5',
'6',
'7',
'8',
'9',
'left',
'ok',
'right',
'rock',
'finger heart',
'praise',
'prayer',
'stop',
'Give the middle finger',
'bow',
'No gesture'
]
def __init__(self, params):
super().__init__(params)
self._dvpp = Dvpp(self._acl_resource)
if not os.path.exists("../data/gesture_yuv"):
os.mkdir("../data/gesture_yuv")
self._tmp_file = "../data/gesture_yuv/tmp.jpg"
"""Try with default ACLImage and DVPP implementation - then implement CV2 and image memory implement if time permits"""
def preprocess(self, image):
image_dvpp = image.copy_to_dvpp()
yuv_image = self._dvpp.jpegd(image_dvpp)
resized_image = self._dvpp.resize(yuv_image, self._model_width, self._model_height)
return resized_image
def postprocess(self, infer_output, origin_img):
data = infer_output[0]
vals = data.flatten()
top_k = vals.argsort()[-1:-2:-1]
if len(top_k):
object_class = self.get_gesture_categories(top_k[0])
origin_img = Image.fromarray(origin_img)
draw = ImageDraw.Draw(origin_img)
font = ImageFont.load_default()
draw.text((10, 50), object_class, font=font, fill=255)
return np.array(origin_img)
return np.array(origin_img)
def predict(self, frame):
cv2.imwrite(self._tmp_file, frame)
self._acl_image = AclImage(self._tmp_file)
resized_image = self.preprocess(self._acl_image)
infer_out = self.model.execute([resized_image,])
result = self.postprocess(infer_out, frame)
return result
def get_gesture_categories(self, gesture_id):
if gesture_id >= len(ModelProcessor.gesture_categories):
return "unknown"
else:
return ModelProcessor.gesture_categories[gesture_id]
| 30.755319
| 123
| 0.639225
|
9d0a53d7b60f2bd54ba2aa318443b9a52d52fb3d
| 12,247
|
py
|
Python
|
myuw/test/api/test_instructor_schedule.py
|
uw-it-aca/myuw
|
3fa1fabeb3c09d81a049f7c1a8c94092d612438a
|
[
"Apache-2.0"
] | 18
|
2015-02-04T01:09:11.000Z
|
2021-11-25T03:10:39.000Z
|
myuw/test/api/test_instructor_schedule.py
|
uw-it-aca/myuw
|
3fa1fabeb3c09d81a049f7c1a8c94092d612438a
|
[
"Apache-2.0"
] | 2,323
|
2015-01-15T19:45:10.000Z
|
2022-03-21T19:57:06.000Z
|
myuw/test/api/test_instructor_schedule.py
|
uw-it-aca/myuw
|
3fa1fabeb3c09d81a049f7c1a8c94092d612438a
|
[
"Apache-2.0"
] | 9
|
2015-01-15T19:29:26.000Z
|
2022-02-11T04:51:23.000Z
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import json
from django.test.utils import override_settings
from myuw.test.api import require_url, MyuwApiTest
from restclients_core.exceptions import DataFailureException
from myuw.views.api.instructor_schedule import InstScheCurQuar, InstSect
from myuw.dao.instructor_schedule import get_instructor_schedule_by_term
from myuw.dao.term import get_current_quarter
from myuw.test import get_request_with_user, get_request_with_date
def get_current_quarter_instructor_schedule(request):
schedule = get_instructor_schedule_by_term(
request, get_current_quarter(request))
return schedule
@require_url('myuw_instructor_current_schedule_api')
class TestInstructorCurrentSchedule(MyuwApiTest):
def test_bill_current_term(self):
now_request = get_request_with_user('bill')
schedule = get_current_quarter_instructor_schedule(now_request)
resp = InstScheCurQuar().get(now_request)
data = json.loads(resp.content)
self.assertTrue(data['grading_period_is_open'])
self.assertFalse(data['grading_period_is_past'])
self.assertEqual(len(data['sections']), 6)
section1 = data['sections'][0]
self.assertEqual(section1['lib_subj_guide'],
'http://guides.lib.uw.edu/research')
self.assertEqual(section1['curriculum_abbr'], "ESS")
self.assertEqual(section1['canvas_url'],
'https://canvas.uw.edu/courses/149651')
self.assertEqual(section1['limit_estimate_enrollment'], 15)
self.assertEqual(section1['final_exam']['latitude'], 47.656645546715)
self.assertEqual(
section1["email_list"]['section_list']['list_address'],
'ess102a_sp13')
section2 = data['sections'][1]
# Coda data
self.assertEqual(section2['failure_rate'], 0.01790613718411552)
section3 = data['sections'][2]
self.assertEqual(section3["color_id"], "2a")
self.assertFalse(section3["mini_card"])
self.assertFalse(section3.get("no_2nd_registration"))
self.assertFalse(section3.get("cc_display_dates"))
self.assertFalse(section3.get("early_fall_start"))
self.assertFalse(section3.get("has_early_fall_start"))
section6 = data['sections'][5]
self.assertTrue(section6['current'])
self.assertEqual(section6['canvas_url'],
'https://canvas.uw.edu/courses/149651')
self.assertEqual(len(section6['grade_submission_delegates']), 1)
self.assertEqual(
len(data['sections'][4]['grade_submission_delegates']), 1)
self.assertGreater(len(data['related_terms']), 3)
self.assertEqual(
section6["email_list"]['section_list']['list_address'],
'train101a_sp13')
self.assertGreater(len(data['related_terms']), 2)
self.assertEqual(data['related_terms'][
len(data['related_terms']) - 3]['quarter'], 'Spring')
self.assertEqual(data['related_terms'][5]['year'], 2013)
# Coda data
self.assertEqual(data['sections'][1]['failure_rate'],
0.01790613718411552)
@require_url('myuw_instructor_schedule_api',
kwargs={'year': 2013, 'quarter': 'summer'},
message="Specific term instructor URLs not configured")
class TestInstructorTermSchedule(MyuwApiTest):
def get_schedule(self, **kwargs):
return self.get_response_by_reverse(
'myuw_instructor_schedule_api',
kwargs=kwargs,)
def test_bill_future_term(self):
self.set_user('bill')
response = self.get_schedule(year=2013, quarter='summer')
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
section = data['sections'][0]
self.assertFalse('failure_rate' in section)
self.assertFalse('evaluation' in section)
def test_bill_past_term(self):
self.set_user('bill')
response = self.get_schedule(year=2013, quarter='winter')
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertFalse(data['sections'][0]['current'])
def test_remote_sections(self):
# MUWM-4728, MUWM-4989
request = get_request_with_user('billsea',
get_request_with_date("2020-10-01"))
schedule = get_current_quarter_instructor_schedule(request)
self.assertFalse(schedule.sections[0].is_remote)
self.assertFalse(schedule.sections[3].is_remote)
def test_having_secondary_sections_case(self):
now_request = get_request_with_user(
'billsea', get_request_with_date("2017-10-01"))
schedule = get_current_quarter_instructor_schedule(now_request)
resp = InstScheCurQuar().get(now_request)
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.content)
self.assertFalse(data["future_term"])
self.assertEqual(len(data['sections']), 6)
primary_section = data['sections'][0]
self.assertEqual(primary_section["section_label"],
"2017_autumn_CSE_154_A")
self.assertEqual(primary_section["color_id"], 1)
self.assertEqual(primary_section["total_linked_secondaries"], 4)
final = primary_section['final_exam']
self.assertFalse(final["is_confirmed"])
self.assertEqual(final["building"], 'ARC')
self.assertEqual(final["room"], '147')
self.assertEqual(primary_section["current_enrollment"], 107)
self.assertEqual(primary_section["limit_estimated_enrollment"], 220)
secondary_section = data['sections'][1]
self.assertEqual(secondary_section["section_label"],
"2017_autumn_CSE_154_AA")
self.assertEqual(secondary_section["color_id"], "1a")
self.assertFalse(secondary_section["mini_card"])
self.assertEqual(secondary_section["primary_section_label"],
"2017_autumn_CSE_154_A")
final = secondary_section["final_exam"]
self.assertFalse(final["is_confirmed"])
self.assertEqual(final["building"], 'ARC')
self.assertEqual(final["room"], '147')
primary_section = data['sections'][5]
self.assertEqual(primary_section["section_label"],
"2017_autumn_EDC_I_552_A")
class TestInstructorSection(MyuwApiTest):
def test_bill_section(self):
now_request = get_request_with_user('bill')
section_id = '2013,spring,ESS,102/A'
resp = InstSect().get(now_request, section_id=section_id)
data = json.loads(resp.content)
self.assertEqual(len(data['sections']), 1)
self.assertEqual(
data['sections'][0]['limit_estimate_enrollment'], 15)
self.assertEqual(
data['sections'][0]['final_exam']['latitude'],
47.656645546715)
self.assertEqual(data['sections'][0]['canvas_url'],
'https://canvas.uw.edu/courses/149651')
self.assertEqual(
len(data['sections'][0]['grade_submission_delegates']), 1)
self.assertGreater(len(data['related_terms']), 3)
self.assertEqual(data['related_terms'][
len(data['related_terms']) - 3]['quarter'], 'Spring')
self.assertEqual(data['related_terms'][5]['year'], 2013)
def test_non_section(self):
now_request = get_request_with_user('bill')
section_id = '2013,spring,ESS,102/Z'
resp = InstSect().get(now_request, section_id=section_id)
self.assertEqual(resp.status_code, 404)
section_id = '2013,spring,ESS,102'
resp = InstSect().get(now_request, section_id=section_id)
self.assertEqual(resp.status_code, 400)
def test_bill100_section(self):
now_request = get_request_with_user('bill100')
section_id = '2013,spring,ESS,102/A'
resp = InstSect().get(now_request, section_id=section_id)
self.assertEqual(resp.status_code, 403)
def test_billpce_current_term(self):
now_request = get_request_with_user('bill')
schedule = get_current_quarter_instructor_schedule(now_request)
resp = InstScheCurQuar().get(now_request)
data = json.loads(resp.content)
self.assertEqual(len(data['sections']), 6)
section1 = data['sections'][0]
self.assertTrue('cc_display_dates' in section1)
self.assertFalse(section1['sln'] == 0)
self.assertEqual(section1['eos_cid'], None)
now_request = get_request_with_user('billpce')
schedule = get_current_quarter_instructor_schedule(now_request)
resp = InstScheCurQuar().get(now_request)
data = json.loads(resp.content)
self.assertEqual(len(data['sections']), 5)
self.assertTrue(data["has_eos_dates"])
section1 = data['sections'][0]
self.assertEqual(section1['section_label'], "2013_spring_AAES_150_A")
self.assertTrue(section1['cc_display_dates'])
self.assertTrue(section1['sln'] == 0)
self.assertEqual(section1['eos_cid'], 116872)
self.assertIsNotNone(section1['myuwclass_url'])
section2 = data['sections'][1]
self.assertTrue(section2['evaluation']["eval_not_exist"])
section3 = data['sections'][2]
self.assertEqual(section3["section_type"], 'CLS')
self.assertTrue(section3["has_eos_dates"])
self.assertFalse(section3["meetings"][0]["start_end_same"])
self.assertTrue(section3["meetings"][2]["start_end_same"])
self.assertEqual(section3['meetings'][0]['eos_start_date'],
'2013-04-03')
self.assertEqual(section3['meetings'][1]['eos_start_date'],
'2013-05-11')
self.assertEqual(section3['meetings'][2]['eos_start_date'],
'2013-05-29')
request = get_request_with_user('billpce',
get_request_with_date("2013-10-01"))
resp = InstScheCurQuar().get(request)
data = json.loads(resp.content)
self.assertEqual(len(data['sections']), 2)
self.assertEqual(data['sections'][0]['current_enrollment'], 1)
self.assertEqual(data['sections'][0]['enrollment_student_name'],
"Student1, Jake Average")
def test_non_instructor(self):
now_request = get_request_with_user('staff')
sche = get_current_quarter_instructor_schedule(now_request)
resp = InstScheCurQuar().get(now_request)
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(len(data['sections']), 0)
def test_billsea_section(self):
now_request = get_request_with_user('billsea')
section_id = '2017,autumn,EDC&I,552/A'
resp = InstSect().get(now_request, section_id=section_id)
data = json.loads(resp.content)
self.assertEqual(len(data['sections']), 1)
self.assertEqual(data['sections'][0]['section_label'],
'2017_autumn_EDC_I_552_A')
self.assertEqual(data['sections'][0]['curriculum_abbr'],
'EDC&I')
def test_billpce_joint_sections(self):
request = get_request_with_user(
'billpce', get_request_with_date("2018-02-01"))
resp = InstScheCurQuar().get(request)
data = json.loads(resp.content)
self.assertEqual(len(data['sections']), 2)
self.assertEqual(data['sections'][0]['current_enrollment'], 18)
self.assertEqual(data['sections'][1]['current_enrollment'], 3)
def test_remote_courese(self):
# MUWM-4728, MUWM-4989
request = get_request_with_user(
'billsea', get_request_with_date('2020-10-01'))
resp = InstScheCurQuar().get(request)
data = json.loads(resp.content)
self.assertEquals(len(data["sections"]), 5)
ee = data["sections"][0]
self.assertFalse(ee["is_remote"])
self.assertFalse(ee["final_exam"]["is_remote"])
self.assertFalse(ee["meetings"][0]["is_remote"])
| 42.821678
| 77
| 0.651343
|
5e6e3d43a4f8d16aae99fc996c8561a9435e282d
| 3,726
|
py
|
Python
|
tests/test_type_utils.py
|
benbariteau/environment_tools
|
b65ff4df5b61ab9ee6e2cb4d922fd2b99d17a9d0
|
[
"Apache-2.0"
] | 13
|
2016-03-29T22:00:32.000Z
|
2021-11-08T10:25:55.000Z
|
tests/test_type_utils.py
|
benbariteau/environment_tools
|
b65ff4df5b61ab9ee6e2cb4d922fd2b99d17a9d0
|
[
"Apache-2.0"
] | 8
|
2015-11-24T22:12:42.000Z
|
2021-05-25T00:06:55.000Z
|
tests/test_type_utils.py
|
benbariteau/environment_tools
|
b65ff4df5b61ab9ee6e2cb4d922fd2b99d17a9d0
|
[
"Apache-2.0"
] | 5
|
2016-10-24T03:40:37.000Z
|
2021-08-10T21:42:56.000Z
|
# -*- coding: utf-8 -*-
import mock
import six
from environment_tools.config import _convert_mapping_to_graph
import environment_tools.type_utils
from environment_tools.type_utils import available_location_types
from environment_tools.type_utils import convert_location_type
from environment_tools.type_utils import compare_types
from environment_tools.type_utils import get_current_location
from environment_tools.type_utils import location_graph
from pytest import yield_fixture
class TestTypeUtils:
@yield_fixture
def mock_data(self):
fake_data = {
'location_types.json': ['environment', 'region', 'az'],
'location_mapping.json': {
'prod_environment': {
'usnorth1-prod_region': {
'usnorth1aprod_az': {},
'usnorth1bprod_az': {},
},
'usnorth2-prod_region': {
'usnorth2aprod_az': {},
'usnorth2bprod_az': {},
'usnorth2cprod_az': {},
},
},
'dev_environment': {
'usnorth1-dev_region': {
'usnorth1adev_az': {},
'usnorth1bdev_az': {},
},
},
},
}
with mock.patch('environment_tools.type_utils._read_data_json',
side_effect=fake_data.get) as mock_fake_data:
empty_graph = environment_tools.type_utils.GraphCache(None, None)
environment_tools.type_utils._location_graph_cache = empty_graph
yield mock_fake_data
environment_tools.type_utils._location_graph_cache = empty_graph
def test_location_graph_cache(self, mock_data):
mock_convert = mock.Mock(spec=_convert_mapping_to_graph)
mock_convert.return_value = 'fake_graph'
with mock.patch(
'environment_tools.type_utils._convert_mapping_to_graph',
mock_convert):
for i in range(5):
assert location_graph() == 'fake_graph'
assert mock_convert.call_count == 1
assert location_graph(use_cache=False) == 'fake_graph'
assert mock_convert.call_count == 2
def test_available_location_types(self, mock_data):
location_types = available_location_types()
assert isinstance(location_types, list)
assert len(location_types) > 0
def test_compare_types(self, mock_data):
assert compare_types('environment', 'az') < 0
assert compare_types('az', 'az') == 0
assert compare_types('az', 'region') > 0
def test_down_convert(self, mock_data):
down_convert = convert_location_type('prod', 'environment', 'az')
assert isinstance(down_convert, list)
assert len(down_convert) > 1
for result in down_convert:
assert isinstance(result, str)
def test_up_convert(self, mock_data):
up = convert_location_type('usnorth1bprod', 'az', 'environment')
assert up == ['prod']
up = convert_location_type('usnorth1aprod', 'az', 'region')
assert up == ['usnorth1-prod']
def test_same_convert(self, mock_data):
same = convert_location_type('usnorth2cprod', 'az', 'az')
assert same == ['usnorth2cprod']
def test_get_current_location(self, mock_data):
mock_open = mock.mock_open(read_data='test ')
if six.PY2:
open_module = '__builtin__.open'
else:
open_module = 'builtins.open'
with mock.patch(open_module, mock_open):
assert get_current_location('az') == 'test'
| 38.020408
| 77
| 0.608964
|
4752ea445c74d7d0ab778915db2c14bd34f2fc46
| 8,730
|
py
|
Python
|
Stress/SIGI_Fase_1_HSM/MpiConclusiones.py
|
VictorAdad/sigi-api-adad
|
412511b04b420a82cf6ae5338e401b181faae022
|
[
"CC0-1.0"
] | null | null | null |
Stress/SIGI_Fase_1_HSM/MpiConclusiones.py
|
VictorAdad/sigi-api-adad
|
412511b04b420a82cf6ae5338e401b181faae022
|
[
"CC0-1.0"
] | null | null | null |
Stress/SIGI_Fase_1_HSM/MpiConclusiones.py
|
VictorAdad/sigi-api-adad
|
412511b04b420a82cf6ae5338e401b181faae022
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf8 -*-
import sys
from locust import HttpLocust, TaskSet, task
from requests_toolbelt import MultipartEncoder
from random import randrange
import json
import requests
import variables
import time
import datetime
import evotilities
#lanza las peticiones para la sesión de conclusiones del caso
def conclusiones_mpi(Mpi, response_idCaso):
###########################################################
### Archivo temporal ###
###########################################################
#petición get al paginador de Archivo temporal
print("Petición a caso " + str(response_idCaso) + " para Archivo temporal ")
response = Mpi.client.get("/v1/base/archivos-temporales/casos/"+str(response_idCaso)+"/page?f=&p=0&tr=10", name="archivos-temporales get")
print("Response status code", response.status_code)
print("Response content", response.content)
#Preparación de json para hacer post a archivo temporal
json_archivoTemporal = {
"caso": {
"id": response_idCaso
},
"personas":[],
"observaciones": "Observaciones para el archivo temporal"
}
json_again = json.dumps(json_archivoTemporal)
print(json_again)
#petición post para guardar archivo temporal en BD
print("Insertando Registro en la BD (Solicitud para archivo temporal)")
Mpi.client.headers['Content-Type'] = "application/json"
response = Mpi.client.post("/v1/base/archivos-temporales", data=json_again, name="archivos-temporales post")
print("Response status_code", response.status_code)
print("Response content", response.content)
formatoArchivoTemporalId = str(json.loads(response.text)['id'])
# Petición get ya con datos en el paginador de archivo temporal
print("Petición a caso " + str(response_idCaso) + " para Archivo temporal ")
response = Mpi.client.get("/v1/base/archivos-temporales/casos/"+str(response_idCaso)+"/page?f=&p=0&tr=10", name="archivos-temporales get")
print("Response status code", response.status_code)
print("Response content", response.content)
#petición get para generar formato de archivo temporal
time.sleep(10)
print("Petición a caso " + str(response_idCaso) + " para generar formato Archivo temporal ")
response = Mpi.client.get("/v1/documentos/formatos/save/"+formatoArchivoTemporalId+"/F1_012", name="ERROR DOC - archivos-temporales generar formato")
print("Response status code", response.status_code)
print("Response content", response.content)
if response.status_code == 500 :
f = open('error.txt', 'a')
f.write('\n' + 'Oficio de archivo temporal: \n' + 'Response status code: \n' + str(response.status_code) +
'\n' + 'Response content: \n' + str(response.content))
f.close()
###########################################################
### No ejercicio de la acción penal ###
###########################################################
#petición get al paginador de no ejercicio a la acción penal
print("Petición a caso " + str(response_idCaso) + " para no ejercicio a la accion penal ")
response = Mpi.client.get("/v1/base/no-ejercicio-accion/casos/"+str(response_idCaso)+"/page?f=&p=0&tr=10", name="no ejercicio a la accion penal get")
print("Response status code", response.status_code)
print("Response content", response.content)
#Preparación de jason para hacer post de no ejercicio a la acción penal
json_noEjericicioAccionPenal = {
"caso": {
"id": response_idCaso
},
"personas":[],
"hipotesisSobreseimiento": "IV",
"ambitoHechos": "competencia",
"narracionHechos": "Estas son las narraciones de los hechos",
"datosPrueba": "Aquí van los datos de prueba ",
"fechaHechoDelictivo": "2018-01-08T16:50:09.264Z",
"articuloCpem": "Articulo x",
"hipotesisCnpp": "Actualización del articulo 327",
"fraccionArticulo": "fracción segunda",
"nombreProcurador": "Pedro Fiscal",
"autoridadCompetente": "Antonio Tablada",
"causaIncompetencia": "causa irreconocible",
"cargoAutoridadCompetente": "Cargo noveno",
"observaciones": "mis observaciones "
}
json_again = json.dumps(json_noEjericicioAccionPenal)
print(json_again)
#petición post para guardar no ejercicio a la acción penal en BD
print("Insertando Registro en la BD (Solicitud para no ejercicio a la accion penal)")
Mpi.client.headers['Content-Type'] = "application/json"
response = Mpi.client.post("/v1/base/no-ejercicio-accion", data=json_again, name="no ejercicio a la acción penal post")
print("Response status_code", response.status_code)
print("Response content", response.content)
formatoNoEjercicioPenalId = str(json.loads(response.text)['id'])
# Petición get ya con datos en el paginador de no ejercicio a la acción penal
print("Petición a caso " + str(response_idCaso) + " para no ejercicio a la accion penal ")
response = Mpi.client.get("/v1/base/no-ejercicio-accion/casos/"+str(response_idCaso)+"/page?f=&p=0&tr=10", name="no ejercicio a la acción penal get")
print("Response status code", response.status_code)
print("Response content", response.content)
#petición get para generar formato no ejercicio a la accion penal
time.sleep(10)
print("Petición a caso " + str(response_idCaso) + " para generar no ejercicio a la accion penal")
response = Mpi.client.get("/v1/documentos/formatos/save/"+formatoNoEjercicioPenalId+"/F1_014", name="ERROR DOC - no ejercicio a la accion penal generar formato")
print("Response status code", response.status_code)
print("Response content", response.content)
if response.status_code == 500 :
f = open('error.txt', 'a')
f.write('\n' + 'Oficio no ejercicio a la accion penal: \n' + 'Response status code: \n' + str(response.status_code) +
'\n' + 'Response content: \n' + str(response.content))
f.close()
###########################################################
### Facultad de no investigar ###
###########################################################
#petición get al paginador de facultad de no investigar
print("Petición a caso " + str(response_idCaso) + " para facultades-no-investigar ")
response = Mpi.client.get("/v1/base/facultades-no-investigar/casos/"+str(response_idCaso)+"/page?f=&p=0&tr=10", name="facultades-no-investigar get")
print("Response status code", response.status_code)
print("Response content", response.content)
#Preparación de jason para hacer post facultad de no investigar
json_facultadNoInvestigar = {
"caso": {
"id": response_idCaso
},
"personas":[],
"observaciones": "Mis observaciones",
"sintesisHechos": "Mi síntesis de los hechos ",
"datosPrueba": "Aquí están mis datos de prueba ",
"motivosAbstuvoInvestigar": "Motivo para investigar",
"medioAlternativoSolucion": "Medios alternativos",
"destinatarioDeterminacion": "A Juan Fernandez",
"superiorJerarquico": "Hoffman Hernan",
"nombreDenunciante": "Juan alvarez",
"originarioDenunciante": "Puebla",
"edadDenunciante": "50",
"domicilioDenunciante": "La otra cuadra",
"fraccion": "5"
}
json_again = json.dumps(json_facultadNoInvestigar)
print(json_again)
#petición post para guardar facultad de no investigar en BD
print("Insertando Registro en la BD (Solicitud para facultades-no-investigar)")
Mpi.client.headers['Content-Type'] = "application/json"
response = Mpi.client.post("/v1/base/facultades-no-investigar", data=json_again, name="facultades-no-investigar post")
print("Response status_code", response.status_code)
print("Response content", response.content)
formatoFacultadId = str(json.loads(response.text)['id'])
# Petición get ya con datos en el paginador de no ejercicio a la acción penal
print("Petición a caso " + str(response_idCaso) + " para facultades-no-investigar ")
response = Mpi.client.get("/v1/base/facultades-no-investigar/casos/"+str(response_idCaso)+"/page?f=&p=0&tr=10", name="facultades-no-investigar penal get")
print("Response status code", response.status_code)
print("Response content", response.content)
#petición get para generar formato de facultad no investigar
time.sleep(10)
print("Petición a caso " + str(response_idCaso) + " para generar facultades-no-investigar")
response = Mpi.client.get("/v1/documentos/formatos/save/"+formatoFacultadId+"/F1_013", name="ERROR DOC - facultades-no-investigar penal generar formato")
print("Response status code", response.status_code)
print("Response content", response.content)
if response.status_code == 500 :
f = open('error.txt', 'a')
f.write('\n' + 'Oficio de facultad de no inverstigar: \n' + 'Response status code: \n' + str(response.status_code) +
'\n' + 'Response content: \n' + str(response.content))
f.close()
| 47.189189
| 163
| 0.690607
|
5855181aa02077e87530ec5d154c8f5efdb1a0f7
| 2,416
|
py
|
Python
|
get_artists.py
|
zyingzhou/hello-world
|
f328a292cf99f4b4d8b74125e21c43d14dd435e9
|
[
"MIT"
] | 261
|
2018-10-10T11:43:50.000Z
|
2022-03-19T03:29:51.000Z
|
get_artists.py
|
zyingzhou/wangyiyun_music
|
f328a292cf99f4b4d8b74125e21c43d14dd435e9
|
[
"MIT"
] | 5
|
2018-11-11T06:05:39.000Z
|
2021-07-18T08:55:44.000Z
|
get_artists.py
|
zyingzhou/wangyiyun_music
|
f328a292cf99f4b4d8b74125e21c43d14dd435e9
|
[
"MIT"
] | 143
|
2018-10-10T05:27:47.000Z
|
2021-12-21T10:22:19.000Z
|
# May 26 2018 Author: Zhiying Zhou
import requests
from bs4 import BeautifulSoup
import csv
# 构造函数获取歌手信息
def get_artists(url):
headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Cookie': '_iuqxldmzr_=32; _ntes_nnid=0e6e1606eb78758c48c3fc823c6c57dd,1527314455632; '
'_ntes_nuid=0e6e1606eb78758c48c3fc823c6c57dd; __utmc=94650624; __utmz=94650624.1527314456.1.1.'
'utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); WM_TID=blBrSVohtue8%2B6VgDkxOkJ2G0VyAgyOY;'
' JSESSIONID-WYYY=Du06y%5Csx0ddxxx8n6G6Dwk97Dhy2vuMzYDhQY8D%2BmW3vlbshKsMRxS%2BJYEnvCCh%5CKY'
'x2hJ5xhmAy8W%5CT%2BKqwjWnTDaOzhlQj19AuJwMttOIh5T%5C05uByqO%2FWM%2F1ZS9sqjslE2AC8YD7h7Tt0Shufi'
'2d077U9tlBepCx048eEImRkXDkr%3A1527321477141; __utma=94650624.1687343966.1527314456.1527314456'
'.1527319890.2; __utmb=94650624.3.10.1527319890',
'Host': 'music.163.com',
'Referer': 'http://music.163.com/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/66.0.3359.181 Safari/537.36'}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'html5lib')
for artist in soup.find_all('a', attrs={'class': 'nm nm-icn f-thide s-fc0'}):
artist_name = artist.string
artist_id = artist['href'].replace('/artist?id=', '').strip()
try:
writer.writerow((artist_id, artist_name))
except Exception as msg:
print(msg)
ls1 = [1001, 1002, 1003, 2001, 2002, 2003, 6001, 6002, 6003, 7001, 7002, 7003, 4001, 4002, 4003] # id的值
ls2 = [-1, 0, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90] # initial的值
csvfile = open('/home/zhiying/文档/music_163_artists(1).csv', 'a', encoding='utf-8') # 文件存储的位置
writer = csv.writer(csvfile)
writer.writerow(('artist_id', 'artist_name'))
for i in ls1:
for j in ls2:
url = 'http://music.163.com/discover/artist/cat?id=' + str(i) + '&initial=' + str(j)
get_artists(url)
| 53.688889
| 132
| 0.622103
|
20cab473124ea2d2aa6208a7db18c441fdc50f7a
| 316
|
py
|
Python
|
app/models/__init__.py
|
Detry322/map-creator
|
e0bf44ba0a99e03b30bb911735f85614d7d9db66
|
[
"MIT"
] | 1
|
2022-02-18T08:19:28.000Z
|
2022-02-18T08:19:28.000Z
|
app/models/__init__.py
|
Detry322/map-creator
|
e0bf44ba0a99e03b30bb911735f85614d7d9db66
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
Detry322/map-creator
|
e0bf44ba0a99e03b30bb911735f85614d7d9db66
|
[
"MIT"
] | null | null | null |
from app.models.basic_dcgan import BasicDCGAN
from app.models.better_dcgan import BetterDCGAN
from app.models.best_dcgan import BestDCGAN
from app.models.autoencoder import Autoencoder
all_models = {
'BasicDCGAN': BasicDCGAN,
'Autoencoder': Autoencoder,
'BetterDCGAN': BetterDCGAN,
'BestDCGAN': BestDCGAN
}
| 26.333333
| 47
| 0.800633
|
63fd40a0915f166543431e61aeedb70bb4672fd6
| 4,495
|
py
|
Python
|
Python/kraken/ui/DataTypeWidgets/Image2DWidgetImpl.py
|
FabricExile/Kraken
|
d8c1f5189cb191945e2c18a1369c458d05305afc
|
[
"BSD-3-Clause"
] | 7
|
2017-12-04T16:57:42.000Z
|
2021-09-07T07:02:38.000Z
|
Python/kraken/ui/DataTypeWidgets/Image2DWidgetImpl.py
|
xtvjxk123456/Kraken
|
d8c1f5189cb191945e2c18a1369c458d05305afc
|
[
"BSD-3-Clause"
] | null | null | null |
Python/kraken/ui/DataTypeWidgets/Image2DWidgetImpl.py
|
xtvjxk123456/Kraken
|
d8c1f5189cb191945e2c18a1369c458d05305afc
|
[
"BSD-3-Clause"
] | 6
|
2017-11-14T06:50:48.000Z
|
2021-08-21T22:47:29.000Z
|
import json
from PySide import QtCore, QtGui
from parameter import Parameter
from AttributeWidgetImpl import AttributeWidget
class Image2DWidget(AttributeWidget):
def __init__(self, attribute, parentWidget=None, addNotificationListener = True):
super(Image2DWidget, self).__init__(attribute, parentWidget=parentWidget, addNotificationListener = addNotificationListener)
self._grid = QtGui.QGridLayout()
self._grid.setContentsMargins(0, 0, 0, 0)
self.__value = self._invokeGetter()
# format
formatLabelWidget = QtGui.QLabel("format", self)
formatLabelWidget.setMinimumWidth(20)
self._formatWidget = QtGui.QLineEdit(self)
self._formatWidget.setText(self.__value.pixelFormat)
self._formatWidget.setReadOnly(True)
self._grid.addWidget(formatLabelWidget, 0, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self._grid.addWidget(self._formatWidget, 0, 1)
# width
widthLabelWidget = QtGui.QLabel("width", self)
widthLabelWidget.setMinimumWidth(20)
self._widthWidget = QtGui.QSpinBox(self)
self._widthWidget.setMinimum(0)
self._widthWidget.setMaximum(9999999)
self._widthWidget.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
self._widthWidget.setValue(self.__value.width)
self._widthWidget.setReadOnly(True)
self._grid.addWidget(widthLabelWidget, 1, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self._grid.addWidget(self._widthWidget, 1, 1)
# height
heightLabelWidget = QtGui.QLabel("height", self)
heightLabelWidget.setMinimumWidth(20)
self._heightWidget = QtGui.QSpinBox(self)
self._heightWidget.setMinimum(0)
self._heightWidget.setMaximum(9999999)
self._heightWidget.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
self._heightWidget.setValue(self.__value.height)
self._heightWidget.setReadOnly(True)
self._grid.addWidget(heightLabelWidget, 2, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self._grid.addWidget(self._heightWidget, 2, 1)
self._thumbnailSize = 40
self.tumbnailWidget = QtGui.QLabel()
self.tumbnailWidget.setBackgroundRole(QtGui.QPalette.Base)
self.tumbnailWidget.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
self.tumbnailWidget.setScaledContents(True)
self._updateThumbnail()
self.setLayout(self._grid)
self.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Preferred)
# self.updateWidgetValue()
def _updateThumbnail(self):
if self.__value.width > 0 and self.__value.height > 0:
self._qimage = QtGui.QImage(self._thumbnailSize, self._thumbnailSize, QtGui.QImage.Format_RGB32)
for i in range(self._thumbnailSize):
for j in range(self._thumbnailSize):
if self.__value.pixelFormat == "RGB":
pixelColor = self.__value.sampleRGB("""RGB""", float(i)/(self._thumbnailSize - 1.0), float(j)/(self._thumbnailSize - 1.0))
elif self.__value.pixelFormat == "RGBA":
pixelColor = self.__value.sampleRGBA("""RGBA""", float(i)/(self._thumbnailSize - 1.0), float(j)/(self._thumbnailSize - 1.0))
pixelValue = QtGui.qRgb(pixelColor.r, pixelColor.g, pixelColor.b)
self._qimage.setPixel(i, j, pixelValue)
self.tumbnailWidget.setPixmap(QtGui.QPixmap.fromImage(self._qimage))
self._grid.addWidget(self.tumbnailWidget, 3, 0, 2, 2)
self._grid.setRowStretch(4, 2)
def getWidgetValue(self):
return self.__value
def setWidgetValue(self, value):
self.__value = value
self._formatWidget.setText(self.__value.pixelFormat)
self._widthWidget.setValue(self.__value.width)
self._heightWidget.setValue(self.__value.height)
self._updateThumbnail()
def unregisterNotificationListener(self):
"""
When the widget is being removed from the inspector,
this method must be called to unregister the event handlers
"""
super(Image2DWidget, self).unregisterNotificationListener()
@classmethod
def canDisplay(cls, attribute):
return attribute.getDataType() == 'Image2D'
Image2DWidget.registerPortWidget()
| 42.40566
| 156
| 0.680089
|
e21bd991ba97cf662693c939ed307aa6166efee6
| 403
|
py
|
Python
|
30-Days-of-Code/30-dictionaries-and-maps.py
|
lakshika1064/Hackerrank_Solutions-Python
|
50ca205c5a3a9a4f294dcda077c390209eb57ecc
|
[
"MIT"
] | 1
|
2020-08-18T08:14:41.000Z
|
2020-08-18T08:14:41.000Z
|
30-Days-of-Code/30-dictionaries-and-maps.py
|
lakshika1064/Hackerrank_Solutions-Python
|
50ca205c5a3a9a4f294dcda077c390209eb57ecc
|
[
"MIT"
] | null | null | null |
30-Days-of-Code/30-dictionaries-and-maps.py
|
lakshika1064/Hackerrank_Solutions-Python
|
50ca205c5a3a9a4f294dcda077c390209eb57ecc
|
[
"MIT"
] | null | null | null |
# Enter your code here. Read input from STDIN. Print output to STDOUT
n=int(input())
dic={}
for i in range(0,n):
name,phone = list(map(str , input().split()))
dic[name]=phone
while(True):
try:
a=input()
if a in dic:
print(a+"="+dic[a])
else:
print("Not found")
except Exception as e:
break
| 23.705882
| 69
| 0.486352
|
5e422df2ab5ec345690974255d6c34b95cc8eefe
| 2,535
|
py
|
Python
|
stockhelper/getData.py
|
morning-dews/FDCA
|
1459f5c5d23d07c66b70bdd598944f5415e0f3ae
|
[
"MIT"
] | null | null | null |
stockhelper/getData.py
|
morning-dews/FDCA
|
1459f5c5d23d07c66b70bdd598944f5415e0f3ae
|
[
"MIT"
] | null | null | null |
stockhelper/getData.py
|
morning-dews/FDCA
|
1459f5c5d23d07c66b70bdd598944f5415e0f3ae
|
[
"MIT"
] | null | null | null |
import urllib.request as urllib
import json
import time
import chardet
class getStockValue():
def __init__(self) -> None:
self.url = 'http://money.finance.sina.com.cn/quotes' + \
'_service/api/json_v2.php/CN_MarketData.getKLineData?symbol='
self.url_realtime = 'http://hq.sinajs.cn/list='
def formUrl(self, symbol: str, scale: int = 5, datalen: int = 2048) -> str:
# The maximum returned data length is fixed
# which means shrink the scale would let us get earlier data.
return self.url + symbol + '&scale=' + str(scale) \
+ '&ma=1&datalen=' + str(datalen)
def getHistoryData(self, Companycode: str, scale: int = 5,
datalen: int = 2048) -> json:
urlt = self.formUrl(Companycode, scale=scale, datalen=datalen)
flag = True
while flag:
try:
flag = False
response = urllib.urlopen(urlt)
raw_html = response.read()
except IOError:
flag = True
print('API is blocked by Sina, \
thus the system is sleeping.... Please wait!')
time.sleep(60 * 30) # sleep 30 minutes
raw_html = str(raw_html, "utf-8")
data = json.loads(raw_html, object_hook=self.as_float)
return data
def getRealTimeData(self, Companycode: str) -> list:
url_r = self.url_realtime + Companycode
flag = True
while flag:
try:
flag = False
response = urllib.urlopen(url_r)
raw_html = response.read()
except IOError:
flag = True
print('API is blocked by Sina, \
thus the system is sleeping.... Please wait!')
time.sleep(60 * 30) # sleep 30 minutes
encoding = chardet.detect(raw_html)["encoding"]
raw_html = str(raw_html, encoding)[21:-3]
data = raw_html.split(',')
return data
def as_float(self, obj: dict) -> dict:
if "open" in obj:
obj["open"] = float(obj["open"])
if "high" in obj:
obj["high"] = float(obj["high"])
if "low" in obj:
obj["low"] = float(obj["low"])
if "close" in obj:
obj["close"] = float(obj["close"])
if "volume" in obj:
obj["volume"] = float(obj["volume"])
return obj
# read_stock = getStockValue()
# data = read_stock.getHistoryData('sz000001')
| 32.088608
| 79
| 0.53925
|
ac214c784ffaa406a1df90be11be64d85b49bb9a
| 68,237
|
py
|
Python
|
clients/python/girder_client/__init__.py
|
fepegar/girder
|
32cc38ac0aefe22b2b54a3c6f94d876b99bf0546
|
[
"Apache-2.0"
] | 395
|
2015-01-12T19:20:13.000Z
|
2022-03-30T05:40:40.000Z
|
clients/python/girder_client/__init__.py
|
fepegar/girder
|
32cc38ac0aefe22b2b54a3c6f94d876b99bf0546
|
[
"Apache-2.0"
] | 2,388
|
2015-01-01T20:09:19.000Z
|
2022-03-29T16:49:14.000Z
|
clients/python/girder_client/__init__.py
|
fepegar/girder
|
32cc38ac0aefe22b2b54a3c6f94d876b99bf0546
|
[
"Apache-2.0"
] | 177
|
2015-01-04T14:47:00.000Z
|
2022-03-25T09:01:51.000Z
|
# -*- coding: utf-8 -*-
from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
__version__ = None
__license__ = 'Apache 2.0'
import diskcache
import getpass
import glob
import io
import json
import logging
import mimetypes
import os
import re
import requests
import shutil
import tempfile
from contextlib import contextmanager
DEFAULT_PAGE_LIMIT = 50 # Number of results to fetch per request
REQ_BUFFER_SIZE = 65536 # Chunk size when iterating a download body
_safeNameRegex = re.compile(r'^[/\\]+')
_logger = logging.getLogger('girder_client.lib')
class AuthenticationError(RuntimeError):
pass
class IncorrectUploadLengthError(RuntimeError):
def __init__(self, message, upload=None):
super().__init__(message)
self.upload = upload
class HttpError(requests.HTTPError):
"""
Raised if the server returns an error status code from a request.
@deprecated This will be removed in a future release of Girder. Raisers of this
exception should instead raise requests.HTTPError manually or through another mechanism
such as requests.Response.raise_for_status.
"""
def __init__(self, status, text, url, method, response=None):
super().__init__('HTTP error %s: %s %s' % (status, method, url), response=response)
self.status = status
self.responseText = text
self.url = url
self.method = method
def __str__(self):
return super().__str__() + '\nResponse text: ' + self.responseText
class IncompleteResponseError(requests.RequestException):
def __init__(self, message, expected, received, response=None):
super().__init__('%s (%d of %d bytes received)' % (
message, received, expected
), response=response)
class _NoopProgressReporter:
reportProgress = False
def __init__(self, label='', length=0):
self.label = label
self.length = length
def update(self, chunkSize):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
pass
class _ProgressBytesIO(io.BytesIO):
def __init__(self, *args, **kwargs):
self.reporter = kwargs.pop('reporter')
super().__init__(*args, **kwargs)
def read(self, _size=-1):
_chunk = super().read(_size)
self.reporter.update(len(_chunk))
return _chunk
class GirderClient:
"""
A class for interacting with the Girder RESTful API.
Some simple examples of how to use this class follow:
.. code-block:: python
client = GirderClient(apiUrl='http://myhost:8080')
client.authenticate('myname', 'mypass')
folder_id = '53b714308926486402ac5aba'
item = client.createItem(folder_id, 'an item name', 'a description')
client.addMetadataToItem(item['_id'], {'metadatakey': 'metadatavalue'})
client.uploadFileToItem(item['_id'], 'path/to/your/file.txt')
r1 = client.getItem(item['_id'])
r2 = client.sendRestRequest('GET', 'item',
{'folderId': folder_id, 'sortdir': '-1' })
r3 = client.sendRestRequest('GET', 'resource/search',
{'q': 'aggregated','types': '["folder", "item"]'})
"""
# The current maximum chunk size for uploading file chunks
MAX_CHUNK_SIZE = 1024 * 1024 * 64
DEFAULT_API_ROOT = 'api/v1'
DEFAULT_HOST = 'localhost'
DEFAULT_LOCALHOST_PORT = 8080
DEFAULT_HTTP_PORT = 80
DEFAULT_HTTPS_PORT = 443
@staticmethod
def getDefaultPort(hostname, scheme):
"""Get default port based on the hostname.
Returns `GirderClient.DEFAULT_HTTPS_PORT` if scheme is `https`, otherwise
returns `GirderClient.DEFAULT_LOCALHOST_PORT` if `hostname` is `localhost`,
and finally returns `GirderClient.DEFAULT_HTTP_PORT`.
"""
if scheme == 'https':
return GirderClient.DEFAULT_HTTPS_PORT
if hostname == 'localhost':
return GirderClient.DEFAULT_LOCALHOST_PORT
return GirderClient.DEFAULT_HTTP_PORT
@staticmethod
def getDefaultScheme(hostname):
"""Get default scheme based on the hostname.
Returns `http` if `hostname` is `localhost` otherwise returns `https`.
"""
if hostname == 'localhost':
return 'http'
else:
return 'https'
def __init__(self, host=None, port=None, apiRoot=None, scheme=None, apiUrl=None,
cacheSettings=None, progressReporterCls=None):
"""
Construct a new GirderClient object, given a host name and port number,
as well as a username and password which will be used in all requests
(HTTP Basic Auth). You can pass the URL in parts with the `host`,
`port`, `scheme`, and `apiRoot` kwargs, or simply pass it in all as
one URL with the `apiUrl` kwarg instead. If you pass `apiUrl`, the
individual part kwargs will be ignored.
:param apiUrl: The full path to the REST API of a Girder instance, e.g.
`http://my.girder.com/api/v1`.
:param host: A string containing the host name where Girder is running,
the default value is 'localhost'
:param port: The port number on which to connect to Girder,
the default value is 80 for http: and 443 for https:
:param apiRoot: The path on the server corresponding to the root of the
Girder REST API. If None is passed, assumes '/api/v1'.
:param scheme: A string containing the scheme for the Girder host,
the default value is 'http'; if you pass 'https' you likely want
to pass 443 for the port
:param cacheSettings: Settings to use with the diskcache library, or
None to disable caching.
:param progressReporterCls: the progress reporter class to instantiate. This class
is expected to be a context manager with a constructor accepting `label` and
`length` keyword arguments, an `update` method accepting a `chunkSize` argument and
a class attribute `reportProgress` set to True (It can conveniently be
initialized using `sys.stdout.isatty()`).
This defaults to :class:`_NoopProgressReporter`.
"""
self.host = None
self.scheme = None
self.port = None
if apiUrl is None:
if not apiRoot:
apiRoot = self.DEFAULT_API_ROOT
# If needed, prepend '/'
if not apiRoot.startswith('/'):
apiRoot = '/' + apiRoot
self.host = host or self.DEFAULT_HOST
self.scheme = scheme or GirderClient.getDefaultScheme(self.host)
self.port = port or GirderClient.getDefaultPort(self.host, self.scheme)
self.urlBase = '%s://%s:%s%s' % (
self.scheme, self.host, str(self.port), apiRoot)
else:
self.urlBase = apiUrl
if self.urlBase[-1] != '/':
self.urlBase += '/'
self.token = ''
self._folderUploadCallbacks = []
self._itemUploadCallbacks = []
self._serverVersion = []
self._serverApiDescription = {}
self.incomingMetadata = {}
self.localMetadata = {}
if cacheSettings is None:
self.cache = None
else:
self.cache = diskcache.Cache(**cacheSettings)
if progressReporterCls is None:
progressReporterCls = _NoopProgressReporter
self.progressReporterCls = progressReporterCls
self._session = None
@contextmanager
def session(self, session=None):
"""
Use a :class:`requests.Session` object for all outgoing requests from
:class:`GirderClient`. If `session` isn't passed into the context manager
then one will be created and yielded. Session objects are useful for enabling
persistent HTTP connections as well as partially applying arguments to many
requests, such as headers.
Note: `session` is closed when the context manager exits, regardless of who
created it.
.. code-block:: python
with gc.session() as session:
session.headers.update({'User-Agent': 'myapp 1.0'})
for itemId in itemIds:
gc.downloadItem(itemId, fh)
In the above example, each request will be executed with the User-Agent header
while reusing the same TCP connection.
:param session: An existing :class:`requests.Session` object, or None.
"""
self._session = session if session else requests.Session()
yield self._session
self._session.close()
self._session = None
def authenticate(self, username=None, password=None, interactive=False, apiKey=None):
"""
Authenticate to Girder, storing the token that comes back to be used in
future requests. This method can be used in two modes, either username
and password authentication, or using an API key. Username example:
.. code-block:: python
gc.authenticate(username='myname', password='mypass')
Note that you may also pass ``interactive=True`` and omit either the
username or password argument to be prompted for them in the shell. The
second mode is using an API key:
.. code-block::python
gc.authenticate(apiKey='J77R3rsLYYqFXXwQ4YquQtek1N26VEJ7IAVz9IpU')
API keys can be created and managed on your user account page in the
Girder web client, and can be used to provide limited access to the Girder web API.
:param username: A string containing the username to use in basic authentication.
:param password: A string containing the password to use in basic authentication.
:param interactive: If you want the user to type their username or
password in the shell rather than passing it in as an argument,
set this to True. If you pass a username in interactive mode, the
user will only be prompted for a password. This option only works
in username/password mode, not API key mode.
:param apiKey: Pass this to use an API key instead of username/password authentication.
:type apiKey: str
"""
if apiKey:
resp = self.post('api_key/token', parameters={
'key': apiKey
})
self.setToken(resp['authToken']['token'])
else:
if interactive:
if username is None:
username = input('Login or email: ')
password = getpass.getpass('Password for %s: ' % username)
if username is None or password is None:
raise Exception('A user name and password are required')
try:
resp = self.sendRestRequest('get', 'user/authentication', auth=(username, password),
headers={'Girder-Token': None})
except HttpError as e:
if e.status in (401, 403):
raise AuthenticationError()
raise
self.setToken(resp['authToken']['token'])
return resp['user']
def setToken(self, token):
"""
Set a token on the GirderClient instance. This is useful in the case
where the client has already been given a valid token, such as a remote job.
:param token: A string containing the existing Girder token
"""
self.token = token
def getServerVersion(self, useCached=True):
"""
Fetch server API version. By default, caches the version
such that future calls to this function do not make another request to
the server.
:param useCached: Whether to return the previously fetched value. Set
to False to force a re-fetch of the version from the server.
:type useCached: bool
:return: The API version as a list (e.g. ``['1', '0', '0']``)
"""
if not self._serverVersion or not useCached:
response = self.get('system/version')
if 'release' in response:
release = response['release'] # girder >= 3
else:
release = response['apiVersion'] # girder < 3
# Do not include any more than 3 version components in the patch version
self._serverVersion = release.split('.', 2)
return self._serverVersion
def getServerAPIDescription(self, useCached=True):
"""
Fetch server RESTful API description.
:param useCached: Whether to return the previously fetched value. Set
to False to force a re-fetch of the description from the server.
:type useCached: bool
:return: The API descriptions as a dict.
For example: ::
{
"basePath": "/api/v1",
"definitions": {},
"host": "girder.example.com",
"info": {
"title": "Girder REST API",
"version": "X.Y.Z"
},
"paths": {
"/api_key": {
"get": {
"description": "Only site administrators [...]",
"operationId": "api_key_listKeys",
"parameters": [
{
"description": "ID of the user whose keys to list.",
"in": "query",
"name": "userId",
"required": false,
"type": "string"
},
...
]
}.
...
}
...
}
}
"""
if not self._serverApiDescription or not useCached:
self._serverApiDescription = self.get('describe')
return self._serverApiDescription
def _requestFunc(self, method):
if self._session is not None:
return getattr(self._session, method.lower())
else:
return getattr(requests, method.lower())
def sendRestRequest(self, method, path, parameters=None,
data=None, files=None, json=None, headers=None, jsonResp=True,
**kwargs):
"""
This method looks up the appropriate method, constructs a request URL
from the base URL, path, and parameters, and then sends the request. If
the method is unknown or if the path is not found, an exception is
raised, otherwise a JSON object is returned with the Girder response.
This is a convenience method to use when making basic requests that do
not involve multipart file data that might need to be specially encoded
or handled differently.
:param method: The HTTP method to use in the request (GET, POST, etc.)
:type method: str
:param path: A string containing the path elements for this request.
Note that the path string should not begin or end with the path separator, '/'.
:type path: str
:param parameters: A dictionary mapping strings to strings, to be used
as the key/value pairs in the request parameters.
:type parameters: dict
:param data: A dictionary, bytes or file-like object to send in the body.
:param files: A dictionary of 'name' => file-like-objects for multipart encoding upload.
:type files: dict
:param json: A JSON object to send in the request body.
:type json: dict
:param headers: If present, a dictionary of headers to encode in the request.
:type headers: dict
:param jsonResp: Whether the response should be parsed as JSON. If False, the raw
response object is returned. To get the raw binary content of the response,
use the ``content`` attribute of the return value, e.g.
.. code-block:: python
resp = client.get('my/endpoint', jsonResp=False)
print(resp.content) # Raw binary content
print(resp.headers) # Dict of headers
:type jsonResp: bool
"""
if not parameters:
parameters = {}
# Look up the HTTP method we need
f = self._requestFunc(method)
# Construct the url
url = self.urlBase + path
# Make the request, passing parameters and authentication info
_headers = {'Girder-Token': self.token}
if isinstance(headers, dict):
_headers.update(headers)
result = f(
url, params=parameters, data=data, files=files, json=json, headers=_headers,
**kwargs)
# If success, return the json object. Otherwise throw an exception.
if result.ok:
if jsonResp:
return result.json()
else:
return result
else:
raise HttpError(
status=result.status_code, url=result.url, method=method, text=result.text,
response=result)
def get(self, path, parameters=None, jsonResp=True):
"""
Convenience method to call :py:func:`sendRestRequest` with the 'GET' HTTP method.
"""
return self.sendRestRequest('GET', path, parameters, jsonResp=jsonResp)
def post(self, path, parameters=None, files=None, data=None, json=None, headers=None,
jsonResp=True):
"""
Convenience method to call :py:func:`sendRestRequest` with the 'POST' HTTP method.
"""
return self.sendRestRequest('POST', path, parameters, files=files,
data=data, json=json, headers=headers, jsonResp=jsonResp)
def put(self, path, parameters=None, data=None, json=None, jsonResp=True):
"""
Convenience method to call :py:func:`sendRestRequest` with the 'PUT'
HTTP method.
"""
return self.sendRestRequest('PUT', path, parameters, data=data,
json=json, jsonResp=jsonResp)
def delete(self, path, parameters=None, jsonResp=True):
"""
Convenience method to call :py:func:`sendRestRequest` with the 'DELETE' HTTP method.
"""
return self.sendRestRequest('DELETE', path, parameters, jsonResp=jsonResp)
def patch(self, path, parameters=None, data=None, json=None, jsonResp=True):
"""
Convenience method to call :py:func:`sendRestRequest` with the 'PATCH' HTTP method.
"""
return self.sendRestRequest('PATCH', path, parameters, data=data,
json=json, jsonResp=jsonResp)
def createResource(self, path, params):
"""
Creates and returns a resource.
"""
return self.post(path, params)
def getResource(self, path, id=None, property=None):
"""
Returns a resource based on ``id`` or None if no resource is found; if
``property`` is passed, returns that property value from the found resource.
"""
route = path
if id is not None:
route += '/%s' % id
if property is not None:
route += '/%s' % property
return self.get(route)
def resourceLookup(self, path):
"""
Look up and retrieve resource in the data hierarchy by path.
:param path: The path of the resource. The path must be an absolute
Unix path starting with either "/user/[user name]" or
"/collection/[collection name]".
"""
return self.get('resource/lookup', parameters={'path': path})
def listResource(self, path, params=None, limit=None, offset=None):
"""
This is a generator that will yield records using the given path and
params until exhausted. Paging of the records is done internally, but
can be overriden by manually passing a ``limit`` value to select only
a single page. Passing an ``offset`` will work in both single-page and
exhaustive modes.
"""
params = dict(params or {})
params['offset'] = offset or 0
params['limit'] = limit if limit is not None else DEFAULT_PAGE_LIMIT
while True:
records = self.get(path, params)
for record in records:
yield record
n = len(records)
if limit or n < params['limit']:
# Either a single slice was requested, or this is the last page
break
params['offset'] += n
def setResourceTimestamp(self, id, type, created=None, updated=None):
"""
Set the created or updated timestamps for a resource.
"""
url = 'resource/%s/timestamp' % id
params = {
'type': type,
}
if created:
params['created'] = str(created)
if updated:
params['updated'] = str(updated)
return self.put(url, parameters=params)
def getFile(self, fileId):
"""
Retrieves a file by its ID.
:param fileId: A string containing the ID of the file to retrieve from Girder.
"""
return self.getResource('file', fileId)
def listFile(self, itemId, limit=None, offset=None):
"""
This is a generator that will yield files under the given itemId.
:param itemId: the item's ID
:param limit: the result set size limit.
:param offset: the result offset.
"""
return self.listResource('item/%s/files' % itemId, params={
'id': itemId,
}, limit=limit, offset=offset)
def createItem(self, parentFolderId, name, description='', reuseExisting=False,
metadata=None):
"""
Creates and returns an item.
:param parentFolderId: the folder this item should be created in.
:param name: the item name.
:param description: a description of the item.
:param reuseExisting: whether to return an existing item if one with
same name already exists.
:param metadata: JSON metadata to set on item.
"""
if metadata is not None and not isinstance(metadata, str):
metadata = json.dumps(metadata)
params = {
'folderId': parentFolderId,
'name': name,
'description': description,
'reuseExisting': reuseExisting,
'metadata': metadata
}
return self.createResource('item', params)
def getItem(self, itemId):
"""
Retrieves a item by its ID.
:param itemId: A string containing the ID of the item to retrieve from Girder.
"""
return self.getResource('item', itemId)
def listItem(self, folderId, text=None, name=None, limit=None, offset=None):
"""
This is a generator that will yield all items under a given folder.
:param folderId: the parent folder's ID.
:param text: query for full text search of items.
:param name: query for exact name match of items.
:param limit: If requesting a specific slice, the length of the slice.
:param offset: Starting offset into the list.
"""
params = {
'folderId': folderId
}
if text:
params['text'] = text
if name:
params['name'] = name
return self.listResource('item', params, limit=limit, offset=offset)
def listUser(self, limit=None, offset=None):
"""
This is a generator that will yield all users in the system.
:param limit: If requesting a specific slice, the length of the slice.
:param offset: Starting offset into the list.
"""
return self.listResource('user', limit=limit, offset=offset)
def getUser(self, userId):
"""
Retrieves a user by its ID.
:param userId: A string containing the ID of the user to
retrieve from Girder.
"""
return self.getResource('user', userId)
def createUser(self, login, email, firstName, lastName, password, admin=None):
"""
Creates and returns a user.
"""
params = {
'login': login,
'email': email,
'firstName': firstName,
'lastName': lastName,
'password': password
}
if admin is not None:
params['admin'] = admin
return self.createResource('user', params)
def listCollection(self, limit=None, offset=None):
"""
This is a generator that will yield all collections in the system.
:param limit: If requesting a specific slice, the length of the slice.
:param offset: Starting offset into the list.
"""
return self.listResource('collection', limit=limit, offset=offset)
def getCollection(self, collectionId):
"""
Retrieves a collection by its ID.
:param collectionId: A string containing the ID of the collection to
retrieve from Girder.
"""
return self.getResource('collection', collectionId)
def createCollection(self, name, description='', public=False):
"""
Creates and returns a collection.
"""
params = {
'name': name,
'description': description,
'public': public
}
return self.createResource('collection', params)
def createFolder(self, parentId, name, description='', parentType='folder',
public=None, reuseExisting=False, metadata=None):
"""
Creates and returns a folder.
:param parentId: The id of the parent resource to create the folder in.
:param name: The name of the folder.
:param description: A description of the folder.
:param parentType: One of ('folder', 'user', 'collection')
:param public: Whether the folder should be marked a public.
:param reuseExisting: Whether to return an existing folder if one with
the same name exists.
:param metadata: JSON metadata to set on the folder.
"""
if metadata is not None and not isinstance(metadata, str):
metadata = json.dumps(metadata)
params = {
'parentId': parentId,
'parentType': parentType,
'name': name,
'description': description,
'reuseExisting': reuseExisting,
'metadata': metadata
}
if public is not None:
params['public'] = public
return self.createResource('folder', params)
def getFolder(self, folderId):
"""
Retrieves a folder by its ID.
:param folderId: A string containing the ID of the folder to retrieve from Girder.
"""
return self.getResource('folder', folderId)
def listFolder(self, parentId, parentFolderType='folder', name=None,
limit=None, offset=None):
"""
This is a generator that will yield a list of folders based on the filter parameters.
:param parentId: The parent's ID.
:param parentFolderType: One of ('folder', 'user', 'collection').
:param name: query for exact name match of items.
:param limit: If requesting a specific slice, the length of the slice.
:param offset: Starting offset into the list.
"""
params = {
'parentId': parentId,
'parentType': parentFolderType
}
if name:
params['name'] = name
return self.listResource('folder', params, limit=limit, offset=offset)
def getFolderAccess(self, folderId):
"""
Retrieves a folder's access by its ID.
:param folderId: A string containing the ID of the folder to retrieve
access for from Girder.
"""
return self.getResource('folder', folderId, 'access')
def setFolderAccess(self, folderId, access, public):
"""
Sets the passed in access control document along with the public value
to the target folder.
:param folderId: Id of the target folder.
:param access: JSON document specifying access control.
:param public: Boolean specificying the public value.
"""
if access is not None and not isinstance(access, str):
access = json.dumps(access)
path = 'folder/' + folderId + '/access'
params = {
'access': access,
'public': public
}
return self.put(path, params)
def isFileCurrent(self, itemId, filename, filepath):
"""
Tests whether the passed in filepath exists in the item with itemId,
with a name of filename, and with the same length. Returns a tuple
(file_id, current) where file_id = id of the file with that filename
under the item, or None if no such file exists under the item.
current = boolean if the file with that filename under the item
has the same size as the file at filepath.
:param itemId: ID of parent item for file.
:param filename: name of file to look for under the parent item.
:param filepath: path to file on disk.
"""
itemFiles = self.listFile(itemId)
for itemFile in itemFiles:
if filename == itemFile['name']:
file_id = itemFile['_id']
size = os.path.getsize(filepath)
return (file_id, size == itemFile['size'])
# Some files may already be stored under a different name, we'll need
# to upload anyway in this case also.
return (None, False)
def uploadFileToItem(self, itemId, filepath, reference=None, mimeType=None, filename=None,
progressCallback=None):
"""
Uploads a file to an item, in chunks.
If ((the file already exists in the item with the same name and size)
or (if the file has 0 bytes), no uploading will be performed.
:param itemId: ID of parent item for file.
:param filepath: path to file on disk.
:param reference: optional reference to send along with the upload.
:type reference: str
:param mimeType: MIME type for the file. Will be guessed if not passed.
:type mimeType: str or None
:param filename: path with filename used in Girder. Defaults to basename of filepath.
:param progressCallback: If passed, will be called after each chunk
with progress information. It passes a single positional argument
to the callable which is a dict of information about progress.
:type progressCallback: callable
:returns: the file that was created.
"""
if filename is None:
filename = filepath
filename = os.path.basename(filename)
filepath = os.path.abspath(filepath)
filesize = os.path.getsize(filepath)
# Check if the file already exists by name and size in the file.
fileId, current = self.isFileCurrent(itemId, filename, filepath)
if fileId is not None and current:
print('File %s already exists in parent Item' % filename)
return
if fileId is not None and not current:
print('File %s exists in item, but with stale contents' % filename)
path = 'file/%s/contents' % fileId
params = {
'size': filesize
}
if reference:
params['reference'] = reference
obj = self.put(path, params)
if '_id' not in obj:
raise Exception(
'After creating an upload token for replacing file '
'contents, expected an object with an id. Got instead: ' + json.dumps(obj))
else:
if mimeType is None:
# Attempt to guess MIME type if not passed explicitly
mimeType, _ = mimetypes.guess_type(filepath)
params = {
'parentType': 'item',
'parentId': itemId,
'name': filename,
'size': filesize,
'mimeType': mimeType
}
if reference:
params['reference'] = reference
obj = self.post('file', params)
if '_id' not in obj:
raise Exception(
'After creating an upload token for a new file, expected '
'an object with an id. Got instead: ' + json.dumps(obj))
with open(filepath, 'rb') as f:
return self._uploadContents(obj, f, filesize, progressCallback=progressCallback)
def uploadStreamToFolder(self, folderId, stream, filename, size, reference=None, mimeType=None,
progressCallback=None):
"""
Uploads a file-like object to a folder, creating a new item in the process. If
the file has 0 bytes, no uploading will be performed, and no item will
be created.
:param folderId: ID of parent folder for file.
:param stream: Readable stream object.
:param filename: Filename used for Girder only.
:param size: The length of the file. This must be exactly equal to the
total number of bytes that will be read from ``stream``, otherwise
the upload will fail.
:param reference: optional reference to send along with the upload.
:param mimeType: MIME type for the file.
:param progressCallback: If passed, will be called after each chunk
with progress information. It passes a single positional argument
to the callable which is a dict of information about progress.
"""
params = {
'parentType': 'folder',
'parentId': folderId,
'name': filename,
'size': size,
'mimeType': mimeType
}
if reference:
params['reference'] = reference
if size <= self.MAX_CHUNK_SIZE and self.getServerVersion() >= ['2', '3']:
chunk = stream.read(size)
if isinstance(chunk, str):
chunk = chunk.encode('utf8')
with self.progressReporterCls(label=filename, length=size) as reporter:
return self.post(
'file', params, data=_ProgressBytesIO(chunk, reporter=reporter))
obj = self.post('file', params)
if '_id' not in obj:
raise Exception(
'After creating an upload token for a new file, expected '
'an object with an id. Got instead: ' + json.dumps(obj))
return self._uploadContents(obj, stream, size, progressCallback=progressCallback)
def uploadFileToFolder(self, folderId, filepath, reference=None, mimeType=None, filename=None,
progressCallback=None):
"""
Uploads a file to a folder, creating a new item in the process. If
the file has 0 bytes, no uploading will be performed, and no item will
be created.
:param folderId: ID of parent folder for file.
:param filepath: path to file on disk.
:param reference: optional reference to send along with the upload.
:type reference: str
:param mimeType: MIME type for the file. Will be guessed if not passed.
:type mimeType: str or None
:param filename: path with filename used in Girder. Defaults to basename of filepath.
:param progressCallback: If passed, will be called after each chunk
with progress information. It passes a single positional argument
to the callable which is a dict of information about progress.
:type progressCallback: callable
:returns: the file that was created.
"""
if filename is None:
filename = filepath
filename = os.path.basename(filename)
filepath = os.path.abspath(filepath)
filesize = os.path.getsize(filepath)
if mimeType is None:
# Attempt to guess MIME type if not passed explicitly
mimeType, _ = mimetypes.guess_type(filepath)
with open(filepath, 'rb') as f:
return self.uploadStreamToFolder(folderId, f, filename, filesize, reference, mimeType,
progressCallback)
def _uploadContents(self, uploadObj, stream, size, progressCallback=None):
"""
Uploads contents of a file.
:param uploadObj: The upload object contain the upload id.
:type uploadObj: dict
:param stream: Readable stream object.
:type stream: file-like
:param size: The length of the file. This must be exactly equal to the
total number of bytes that will be read from ``stream``, otherwise
the upload will fail.
:type size: str
:param progressCallback: If passed, will be called after each chunk
with progress information. It passes a single positional argument
to the callable which is a dict of information about progress.
:type progressCallback: callable
"""
offset = 0
uploadId = uploadObj['_id']
with self.progressReporterCls(label=uploadObj.get('name', ''), length=size) as reporter:
while True:
chunk = stream.read(min(self.MAX_CHUNK_SIZE, (size - offset)))
if not chunk:
break
if isinstance(chunk, str):
chunk = chunk.encode('utf8')
uploadObj = self.post(
'file/chunk?offset=%d&uploadId=%s' % (offset, uploadId),
data=_ProgressBytesIO(chunk, reporter=reporter))
if '_id' not in uploadObj:
raise Exception(
'After uploading a file chunk, did not receive object with _id. '
'Got instead: ' + json.dumps(uploadObj))
offset += len(chunk)
if callable(progressCallback):
progressCallback({
'current': offset,
'total': size
})
if offset != size:
self.delete('file/upload/' + uploadId)
raise IncorrectUploadLengthError(
'Expected upload to be %d bytes, but received %d.' % (size, offset),
upload=uploadObj)
return uploadObj
def uploadFile(self, parentId, stream, name, size, parentType='item',
progressCallback=None, reference=None, mimeType=None):
"""
Uploads a file into an item or folder.
:param parentId: The ID of the folder or item to upload into.
:type parentId: str
:param stream: Readable stream object.
:type stream: file-like
:param name: The name of the file to create.
:type name: str
:param size: The length of the file. This must be exactly equal to the
total number of bytes that will be read from ``stream``, otherwise
the upload will fail.
:type size: str
:param parentType: 'item' or 'folder'.
:type parentType: str
:param progressCallback: If passed, will be called after each chunk
with progress information. It passes a single positional argument
to the callable which is a dict of information about progress.
:type progressCallback: callable
:param reference: optional reference to send along with the upload.
:type reference: str
:param mimeType: MIME type to set on the file. Attempts to guess if not
explicitly passed.
:type mimeType: str or None
:returns: The file that was created on the server.
"""
params = {
'parentType': parentType,
'parentId': parentId,
'name': name,
'size': size,
'mimeType': mimeType or mimetypes.guess_type(name)[0]
}
if reference is not None:
params['reference'] = reference
obj = self.post('file', params)
if '_id' not in obj:
raise Exception(
'After creating an upload token for a new file, expected '
'an object with an id. Got instead: ' + json.dumps(obj))
return self._uploadContents(obj, stream, size, progressCallback=progressCallback)
def uploadFileContents(self, fileId, stream, size, reference=None):
"""
Uploads the contents of an existing file.
:param fileId: ID of file to update
:param stream: Readable stream object.
:type stream: file-like
:param size: The length of the file. This must be exactly equal to the
total number of bytes that will be read from ``stream``, otherwise
the upload will fail.
:type size: str
:param reference: optional reference to send along with the upload.
:type reference: str
"""
path = 'file/%s/contents' % fileId
params = {
'size': size
}
if reference:
params['reference'] = reference
obj = self.put(path, params)
if '_id' not in obj:
raise Exception(
'After creating an upload token for replacing file '
'contents, expected an object with an id. Got instead: ' + json.dumps(obj))
return self._uploadContents(obj, stream, size)
def addMetadataToItem(self, itemId, metadata):
"""
Takes an item ID and a dictionary containing the metadata
:param itemId: ID of the item to set metadata on.
:param metadata: dictionary of metadata to set on item.
"""
path = 'item/' + itemId + '/metadata'
obj = self.put(path, json=metadata)
return obj
def addMetadataToFolder(self, folderId, metadata):
"""
Takes a folder ID and a dictionary containing the metadata
:param folderId: ID of the folder to set metadata on.
:param metadata: dictionary of metadata to set on folder.
"""
path = 'folder/' + folderId + '/metadata'
obj = self.put(path, json=metadata)
return obj
def addMetadataToCollection(self, collectionId, metadata):
"""
Takes a collection ID and a dictionary containing the metadata
:param collectionId: ID of the collection to set metadata on.
:param metadata: dictionary of metadata to set on collection.
"""
path = 'collection/' + collectionId + '/metadata'
obj = self.put(path, json=metadata)
return obj
def transformFilename(self, name):
"""
Sanitize a resource name from Girder into a name that is safe to use
as a filesystem path.
:param name: The name to transform.
:type name: str
"""
if name in ('.', '..'):
name = '_' + name
name = name.replace(os.path.sep, '_')
if os.path.altsep:
name = name.replace(os.path.altsep, '_')
return _safeNameRegex.sub('_', name)
def _copyFile(self, fp, path):
"""
Copy the `fp` file-like object to `path` which may be a filename string
or another file-like object to write to.
"""
if isinstance(path, str):
# Use "abspath" to cleanly get the parent of ".", without following symlinks otherwise
os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
with open(path, 'wb') as dst:
shutil.copyfileobj(fp, dst)
else:
# assume `path` is a file-like object
shutil.copyfileobj(fp, path)
def _streamingFileDownload(self, fileId):
"""
Download a file streaming the contents
:param fileId: The ID of the Girder file to download.
:returns: The request
"""
path = 'file/%s/download' % fileId
return self.sendRestRequest('get', path, stream=True, jsonResp=False)
def downloadFile(self, fileId, path, created=None):
"""
Download a file to the given local path or file-like object.
:param fileId: The ID of the Girder file to download.
:param path: The path to write the file to, or a file-like object.
"""
fileObj = self.getFile(fileId)
created = created or fileObj['created']
cacheKey = '\n'.join([self.urlBase, fileId, created])
# see if file is in local cache
if self.cache is not None:
fp = self.cache.get(cacheKey, read=True)
if fp:
with fp:
self._copyFile(fp, path)
return
# download to a tempfile
progressFileName = fileId
if isinstance(path, str):
progressFileName = os.path.basename(path)
req = self._streamingFileDownload(fileId)
with tempfile.NamedTemporaryFile(delete=False) as tmp:
with self.progressReporterCls(
label=progressFileName,
length=int(req.headers.get('content-length', 0))) as reporter:
for chunk in req.iter_content(chunk_size=REQ_BUFFER_SIZE):
reporter.update(len(chunk))
tmp.write(chunk)
size = os.stat(tmp.name).st_size
if size != fileObj['size']:
os.remove(tmp.name)
raise IncompleteResponseError('File %s download' % fileId, fileObj['size'], size)
# save file in cache
if self.cache is not None:
with open(tmp.name, 'rb') as fp:
self.cache.set(cacheKey, fp, read=True)
if isinstance(path, str):
# we can just rename the tempfile
# Use "abspath" to cleanly get the parent of ".", without following symlinks otherwise
os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
shutil.move(tmp.name, path)
else:
# write to file-like object
with open(tmp.name, 'rb') as fp:
shutil.copyfileobj(fp, path)
# delete the temp file
os.remove(tmp.name)
def downloadFileAsIterator(self, fileId, chunkSize=REQ_BUFFER_SIZE):
"""
Download a file streaming the contents as an iterator.
:param fileId: The ID of the Girder file to download.
:param chunkSize: The chunk size to download the contents in.
:returns: The request content iterator.
"""
req = self._streamingFileDownload(fileId)
return req.iter_content(chunk_size=chunkSize)
def downloadItem(self, itemId, dest, name=None):
"""
Download an item from Girder into a local folder. Each file in the
item will be placed into the directory specified by the dest parameter.
If the item contains multiple files or a single file with a different
name than the item, the item will be created as a directory under dest
and the files will become files within that directory.
:param itemId: The Id of the Girder item to download.
:param dest: The destination directory to write the item into.
:param name: If the item name is known in advance, you may pass it here
which will save a lookup to the server.
"""
if name is None:
item = self.get('item/' + itemId)
name = item['name']
offset = 0
first = True
while True:
files = self.get('item/%s/files' % itemId, parameters={
'limit': DEFAULT_PAGE_LIMIT,
'offset': offset
})
if first:
if len(files) == 1 and files[0]['name'] == name:
self.downloadFile(
files[0]['_id'],
os.path.join(dest, self.transformFilename(name)),
created=files[0]['created'])
break
else:
dest = os.path.join(dest, self.transformFilename(name))
os.makedirs(dest, exist_ok=True)
for file in files:
self.downloadFile(
file['_id'],
os.path.join(dest, self.transformFilename(file['name'])),
created=file['created'])
first = False
offset += len(files)
if len(files) < DEFAULT_PAGE_LIMIT:
break
def downloadFolderRecursive(self, folderId, dest, sync=False):
"""
Download a folder recursively from Girder into a local directory.
:param folderId: Id of the Girder folder or resource path to download.
:type folderId: ObjectId or Unix-style path to the resource in Girder.
:param dest: The local download destination.
:type dest: str
:param sync: If True, check if item exists in local metadata
cache and skip download provided that metadata is identical.
:type sync: bool
"""
offset = 0
folderId = self._checkResourcePath(folderId)
while True:
folders = self.get('folder', parameters={
'limit': DEFAULT_PAGE_LIMIT,
'offset': offset,
'parentType': 'folder',
'parentId': folderId
})
for folder in folders:
local = os.path.join(dest, self.transformFilename(folder['name']))
os.makedirs(local, exist_ok=True)
self.downloadFolderRecursive(folder['_id'], local, sync=sync)
offset += len(folders)
if len(folders) < DEFAULT_PAGE_LIMIT:
break
offset = 0
while True:
items = self.get('item', parameters={
'folderId': folderId,
'limit': DEFAULT_PAGE_LIMIT,
'offset': offset
})
for item in items:
_id = item['_id']
self.incomingMetadata[_id] = item
if sync and _id in self.localMetadata and item == self.localMetadata[_id]:
continue
self.downloadItem(item['_id'], dest, name=item['name'])
offset += len(items)
if len(items) < DEFAULT_PAGE_LIMIT:
break
def downloadResource(self, resourceId, dest, resourceType='folder', sync=False):
"""
Download a collection, user, or folder recursively from Girder into a local directory.
:param resourceId: ID or path of the resource to download.
:type resourceId: ObjectId or Unix-style path to the resource in Girder.
:param dest: The local download destination. Can be an absolute path or relative to
the current working directory.
:type dest: str
:param resourceType: The type of resource being downloaded: 'collection', 'user',
or 'folder'.
:type resourceType: str
:param sync: If True, check if items exist in local metadata
cache and skip download if the metadata is identical.
:type sync: bool
"""
if resourceType == 'folder':
self.downloadFolderRecursive(resourceId, dest, sync)
elif resourceType in ('collection', 'user'):
offset = 0
resourceId = self._checkResourcePath(resourceId)
while True:
folders = self.get('folder', parameters={
'limit': DEFAULT_PAGE_LIMIT,
'offset': offset,
'parentType': resourceType,
'parentId': resourceId
})
for folder in folders:
local = os.path.join(dest, self.transformFilename(folder['name']))
os.makedirs(local, exist_ok=True)
self.downloadFolderRecursive(folder['_id'], local, sync=sync)
offset += len(folders)
if len(folders) < DEFAULT_PAGE_LIMIT:
break
else:
raise Exception('Invalid resource type: %s' % resourceType)
def saveLocalMetadata(self, dest):
"""
Dumps item metadata collected during a folder download.
:param dest: The local download destination.
"""
with open(os.path.join(dest, '.girder_metadata'), 'w') as fh:
fh.write(json.dumps(self.incomingMetadata))
def loadLocalMetadata(self, dest):
"""
Reads item metadata from a local folder.
:param dest: The local download destination.
"""
try:
with open(os.path.join(dest, '.girder_metadata'), 'r') as fh:
self.localMetadata = json.loads(fh.read())
except OSError:
print('Local metadata does not exists. Falling back to download.')
def inheritAccessControlRecursive(self, ancestorFolderId, access=None, public=None):
"""
Take the access control and public value of a folder and recursively
copy that access control and public value to all folder descendants,
replacing any existing access control on the descendant folders with
that of the ancestor folder.
:param ancestorFolderId: Id of the Girder folder to copy access
control from, to all of its descendant folders.
:param access: Dictionary Access control target, if None, will take
existing access control of ancestor folder
:param public: Boolean public value target, if None, will take existing
public value of ancestor folder
"""
offset = 0
if public is None:
public = self.getFolder(ancestorFolderId)['public']
if access is None:
access = self.getFolderAccess(ancestorFolderId)
while True:
self.setFolderAccess(ancestorFolderId, json.dumps(access), public)
folders = self.get('folder', parameters={
'limit': DEFAULT_PAGE_LIMIT,
'offset': offset,
'parentType': 'folder',
'parentId': ancestorFolderId
})
for folder in folders:
self.inheritAccessControlRecursive(folder['_id'], access, public)
offset += len(folders)
if len(folders) < DEFAULT_PAGE_LIMIT:
break
def addFolderUploadCallback(self, callback):
"""Saves a passed in callback function that will be called after each
folder has completed. Multiple callback functions can be added, they
will be called in the order they were added by calling this function.
Callback functions will be called after a folder in Girder is created
and all subfolders and items for that folder have completed uploading.
Callback functions should take two parameters:
- the folder in Girder
- the full path to the local folder
:param callback: callback function to be called.
"""
self._folderUploadCallbacks.append(callback)
def addItemUploadCallback(self, callback):
"""Saves a passed in callback function that will be called after each
item has completed. Multiple callback functions can be added, they
will be called in the order they were added by calling this function.
Callback functions will be called after an item in Girder is created
and all files for that item have been uploaded. Callback functions
should take two parameters:
- the item in Girder
- the full path to the local folder or file comprising the item
:param callback: callback function to be called.
"""
self._itemUploadCallbacks.append(callback)
def loadOrCreateFolder(self, folderName, parentId, parentType, metadata=None):
"""Returns a folder in Girder with the given name under the given
parent. If none exists yet, it will create it and return it.
:param folderName: the name of the folder to look up.
:param parentId: id of parent in Girder
:param parentType: one of (collection, folder, user)
:param metadata: JSON metadata string to set on folder.
:returns: The folder that was found or created.
"""
children = self.listFolder(parentId, parentType, name=folderName)
try:
return next(children)
except StopIteration:
return self.createFolder(parentId, folderName, parentType=parentType,
metadata=metadata)
def _hasOnlyFiles(self, localFolder):
"""Returns whether a folder has only files. This will be false if the
folder contains any subdirectories.
:param localFolder: full path to the local folder
"""
return not any(os.path.isdir(os.path.join(localFolder, entry))
for entry in os.listdir(localFolder))
def loadOrCreateItem(self, name, parentFolderId, reuseExisting=True, metadata=None):
"""Create an item with the given name in the given parent folder.
:param name: The name of the item to load or create.
:param parentFolderId: id of parent folder in Girder
:param reuseExisting: boolean indicating whether to load an existing
item of the same name in the same location, or create a new one.
:param metadata: JSON metadata string to set on item.
"""
item = None
if reuseExisting:
children = self.listItem(parentFolderId, name=name)
try:
item = next(children)
except StopIteration:
pass
if item is None:
item = self.createItem(parentFolderId, name, description='', metadata=metadata)
return item
def _uploadAsItem(self, localFile, parentFolderId, filePath, reuseExisting=False, dryRun=False,
reference=None):
"""Function for doing an upload of a file as an item.
:param localFile: name of local file to upload
:param parentFolderId: id of parent folder in Girder
:param filePath: full path to the file
:param reuseExisting: boolean indicating whether to accept an existing item
of the same name in the same location, or create a new one instead
:param reference: Option reference to send along with the upload.
"""
if not self.progressReporterCls.reportProgress:
print('Uploading Item from %s' % localFile)
if not dryRun:
# If we are reusing existing items or have upload callbacks, then
# we need to know the item as part of the process. If this is a
# zero-length file, we create an item. Otherwise, we can just
# upload to the parent folder and never learn about the created
# item.
if reuseExisting or len(self._itemUploadCallbacks) or os.path.getsize(filePath) == 0:
currentItem = self.loadOrCreateItem(
os.path.basename(localFile), parentFolderId, reuseExisting)
self.uploadFileToItem(
currentItem['_id'], filePath, filename=localFile, reference=reference)
for callback in self._itemUploadCallbacks:
callback(currentItem, filePath)
else:
self.uploadFileToFolder(
parentFolderId, filePath, filename=localFile, reference=reference)
def _uploadFolderAsItem(self, localFolder, parentFolderId, reuseExisting=False, blacklist=None,
dryRun=False, reference=None):
"""
Take a folder and use its base name as the name of a new item. Then,
upload its containing files into the new item as bitstreams.
:param localFolder: The path to the folder to be uploaded.
:param parentFolderId: Id of the destination folder for the new item.
:param reuseExisting: boolean indicating whether to accept an existing item
of the same name in the same location, or create a new one instead
:param reference: Option reference to send along with the upload.
"""
blacklist = blacklist or []
print('Creating Item from folder %s' % localFolder)
if not dryRun:
item = self.loadOrCreateItem(
os.path.basename(localFolder), parentFolderId, reuseExisting)
subdircontents = sorted(os.listdir(localFolder))
# for each file in the subdir, add it to the item
filecount = len(subdircontents)
for (ind, currentFile) in enumerate(subdircontents):
filepath = os.path.join(localFolder, currentFile)
if currentFile in blacklist:
if dryRun:
print('Ignoring file %s as blacklisted' % currentFile)
continue
print('Adding file %s, (%d of %d) to Item' % (currentFile, ind + 1, filecount))
if not dryRun:
self.uploadFileToItem(item['_id'], filepath, filename=currentFile)
if not dryRun:
for callback in self._itemUploadCallbacks:
callback(item, localFolder)
def _uploadFolderRecursive(self, localFolder, parentId, parentType, leafFoldersAsItems=False,
reuseExisting=False, blacklist=None, dryRun=False, reference=None):
"""
Function to recursively upload a folder and all of its descendants.
:param localFolder: full path to local folder to be uploaded
:param parentId: id of parent in Girder, where new folder will be added
:param parentType: one of (collection, folder, user)
:param leafFoldersAsItems: whether leaf folders should have all
files uploaded as single items
:param reuseExisting: boolean indicating whether to accept an existing item
of the same name in the same location, or create a new one instead
:param reference: Option reference to send along with the upload.
"""
blacklist = blacklist or []
if leafFoldersAsItems and self._hasOnlyFiles(localFolder):
if parentType != 'folder':
raise Exception(
('Attempting to upload a folder as an item under a %s. '
% parentType) + 'Items can only be added to folders.')
else:
self._uploadFolderAsItem(localFolder, parentId, reuseExisting, dryRun=dryRun)
else:
filename = os.path.basename(localFolder)
if filename in blacklist:
if dryRun:
print('Ignoring file %s as it is blacklisted' % filename)
return
print('Creating Folder from %s' % localFolder)
if dryRun:
# create a dry run placeholder
folder = {'_id': 'dryrun'}
else:
folder = self.loadOrCreateFolder(
os.path.basename(localFolder), parentId, parentType)
for entry in sorted(os.listdir(localFolder)):
if entry in blacklist:
if dryRun:
print('Ignoring file %s as it is blacklisted' % entry)
continue
fullEntry = os.path.join(localFolder, entry)
if os.path.islink(fullEntry):
# os.walk skips symlinks by default
print('Skipping file %s as it is a symlink' % entry)
continue
elif os.path.isdir(fullEntry):
# At this point we should have an actual folder, so can
# pass that as the parent_type
self._uploadFolderRecursive(
fullEntry, folder['_id'], 'folder', leafFoldersAsItems, reuseExisting,
blacklist=blacklist, dryRun=dryRun, reference=reference)
else:
self._uploadAsItem(
entry, folder['_id'], fullEntry, reuseExisting, dryRun=dryRun,
reference=reference)
if not dryRun:
for callback in self._folderUploadCallbacks:
callback(folder, localFolder)
def upload(self, filePattern, parentId, parentType='folder', leafFoldersAsItems=False,
reuseExisting=False, blacklist=None, dryRun=False, reference=None):
"""
Upload a pattern of files.
This will recursively walk down every tree in the file pattern to
create a hierarchy on the server under the parentId.
:param filePattern: a glob pattern for files that will be uploaded,
recursively copying any file folder structures. If this is a list
or tuple each item in it will be used in turn.
:type filePattern: str
:param parentId: Id of the parent in Girder or resource path.
:type parentId: ObjectId or Unix-style path to the resource in Girder.
:param parentType: one of (collection,folder,user), default of folder.
:type parentType: str
:param leafFoldersAsItems: bool whether leaf folders should have all
files uploaded as single items.
:type leafFoldersAsItems: bool
:param reuseExisting: bool whether to accept an existing item of
the same name in the same location, or create a new one instead.
:type reuseExisting: bool
:param dryRun: Set this to True to print out what actions would be taken, but
do not actually communicate with the server.
:type dryRun: bool
:param reference: Option reference to send along with the upload.
:type reference: str
"""
filePatternList = filePattern if isinstance(filePattern, (list, tuple)) else [filePattern]
blacklist = blacklist or []
empty = True
parentId = self._checkResourcePath(parentId)
for pattern in filePatternList:
for currentFile in glob.iglob(pattern):
empty = False
currentFile = os.path.normpath(currentFile)
filename = os.path.basename(currentFile)
if filename in blacklist:
if dryRun:
print('Ignoring file %s as it is blacklisted' % filename)
continue
if os.path.isfile(currentFile):
if parentType != 'folder':
raise Exception(
'Attempting to upload an item under a %s. Items can only be added to '
'folders.' % parentType)
else:
self._uploadAsItem(
os.path.basename(currentFile), parentId, currentFile, reuseExisting,
dryRun=dryRun, reference=reference)
else:
self._uploadFolderRecursive(
currentFile, parentId, parentType, leafFoldersAsItems, reuseExisting,
blacklist=blacklist, dryRun=dryRun, reference=reference)
if empty:
print('No matching files: ' + repr(filePattern))
def _checkResourcePath(self, objId):
if isinstance(objId, str) and objId.startswith('/'):
try:
return self.resourceLookup(objId)['_id']
except requests.HTTPError:
return None
return objId
| 40.139412
| 100
| 0.593593
|
df77c2bd2a2e55782729350d45ba0800710283a5
| 15,492
|
py
|
Python
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/calculators/calc_modulator.py
|
SiliconLabs/gecko_sdk
|
310814a9016b60a8012d50c62cc168a783ac102b
|
[
"Zlib"
] | 69
|
2021-12-16T01:34:09.000Z
|
2022-03-31T08:27:39.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/calculators/calc_modulator.py
|
SiliconLabs/gecko_sdk
|
310814a9016b60a8012d50c62cc168a783ac102b
|
[
"Zlib"
] | 6
|
2022-01-12T18:22:08.000Z
|
2022-03-25T10:19:27.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/calculators/calc_modulator.py
|
SiliconLabs/gecko_sdk
|
310814a9016b60a8012d50c62cc168a783ac102b
|
[
"Zlib"
] | 21
|
2021-12-20T09:05:45.000Z
|
2022-03-28T02:52:28.000Z
|
from pyradioconfig.parts.ocelot.calculators.calc_modulator import CALC_Modulator_Ocelot
from pycalcmodel.core.variable import ModelVariableFormat, CreateModelVariableEnum
from pyradioconfig.calculator_model_framework.Utils.LogMgr import LogMgr
from math import ceil,log2
from enum import Enum
class Calc_Modulator_Sol(CALC_Modulator_Ocelot):
def buildVariables(self, model):
super().buildVariables(model)
self._addModelVariable(model, 'ofdm_rate_hz', float, ModelVariableFormat.DECIMAL,
desc='OFDM rate for softmodem')
self._addModelVariable(model, 'softmodem_tx_interpolation1',int,ModelVariableFormat.DECIMAL,desc='interpolation rate 1 for softmodem TX')
self._addModelVariable(model, 'softmodem_tx_interpolation2', int, ModelVariableFormat.DECIMAL,
desc='interpolation rate 2 for softmodem TX')
self._addModelVariable(model, 'softmodem_modulator_select', Enum, ModelVariableFormat.DECIMAL,
desc='determines modulator path for softmodem PHYs')
model.vars.softmodem_modulator_select.var_enum = CreateModelVariableEnum(
'SoftmodemModSelEnum',
'List of softmodem modulator paths',
[['IQ_MOD',0,'OFDM modulator'],
['LEGACY_MOD',1,'Legacy modulator'],
['NONE',2,'Not using softmodem']])
self._addModelVariable(model, 'softmodem_txircal_params', int, ModelVariableFormat.DECIMAL,
desc='TX IRCal parameters [kt, int2ratio, int2gain]', is_array=True)
self._addModelVariable(model, 'softmodem_txircal_freq', int, ModelVariableFormat.DECIMAL,
desc='TX IRCal tone freq')
def calc_softmodem_modulator_select(self, model):
softmodem_modulation_type = model.vars.softmodem_modulation_type.value
if (softmodem_modulation_type == model.vars.softmodem_modulation_type.var_enum.SUN_OFDM) or \
(softmodem_modulation_type == model.vars.softmodem_modulation_type.var_enum.CW):
softmodem_modulator_select = model.vars.softmodem_modulator_select.var_enum.IQ_MOD
elif softmodem_modulation_type != model.vars.softmodem_modulation_type.var_enum.NONE:
softmodem_modulator_select = model.vars.softmodem_modulator_select.var_enum.LEGACY_MOD
else:
softmodem_modulator_select = model.vars.softmodem_modulator_select.var_enum.NONE
#Write the model var
model.vars.softmodem_modulator_select.value = softmodem_modulator_select
def calc_txmodsel_reg(self, model):
#Read in model vars
softmodem_modulator_select = model.vars.softmodem_modulator_select.value
if softmodem_modulator_select == model.vars.softmodem_modulator_select.var_enum.IQ_MOD:
txmodsel = 0
elif softmodem_modulator_select == model.vars.softmodem_modulator_select.var_enum.LEGACY_MOD:
txmodsel = 1
else:
txmodsel = 0
#Write the register
self._reg_write(model.vars.RAC_SOFTMCTRL_TXMODSEL, txmodsel)
def calc_softmodem_tx_interpolation(self, model):
#This method calculates the interpolation rates for softmodem PHYs
#Read in model vars
softmodem_modulation_type = model.vars.softmodem_modulation_type.value
ofdm_option = model.vars.ofdm_option.value
softmodem_modulator_select = model.vars.softmodem_modulator_select.value
#Only need to set interpolation values for OFDM MOD
if softmodem_modulator_select == model.vars.softmodem_modulator_select.var_enum.IQ_MOD:
if softmodem_modulation_type == model.vars.softmodem_modulation_type.var_enum.SUN_OFDM:
softmodem_tx_interpolation1 = 7 #Static for now
if ofdm_option == model.vars.ofdm_option.var_enum.OPT1:
softmodem_tx_interpolation2 = 2
elif ofdm_option == model.vars.ofdm_option.var_enum.OPT2:
softmodem_tx_interpolation2 = 4
elif ofdm_option == model.vars.ofdm_option.var_enum.OPT3:
softmodem_tx_interpolation2 = 8
else:
softmodem_tx_interpolation2 = 16
else:
softmodem_tx_interpolation1 = 1
softmodem_tx_interpolation2 = 5
else:
softmodem_tx_interpolation1 = 0
softmodem_tx_interpolation2 = 0
#Write the model vars
model.vars.softmodem_tx_interpolation1.value = softmodem_tx_interpolation1
model.vars.softmodem_tx_interpolation2.value = softmodem_tx_interpolation2
def calc_int1cfg_reg(self, model):
#This method calculates the int1cfg register fields
#Read in model vars
softmodem_modulator_select = model.vars.softmodem_modulator_select.value
softmodem_tx_interpolation1 = model.vars.softmodem_tx_interpolation1.value
# Set only when OFDM modulator is used
if softmodem_modulator_select == model.vars.softmodem_modulator_select.var_enum.IQ_MOD:
ratio = softmodem_tx_interpolation1-1
gainshift = 12 #Static for now
else:
ratio=0
gainshift=0
#Write the registers
self._reg_write(model.vars.TXFRONT_INT1CFG_RATIO,ratio)
self._reg_write(model.vars.TXFRONT_INT1CFG_GAINSHIFT, gainshift)
def calc_int2cfg_reg(self, model):
#This method calculates the int2cfg register fields
#Read in model vars
softmodem_modulator_select = model.vars.softmodem_modulator_select.value
softmodem_tx_interpolation2 = model.vars.softmodem_tx_interpolation2.value
# Set only when OFDM modulator is used
if softmodem_modulator_select == model.vars.softmodem_modulator_select.var_enum.IQ_MOD:
ratio = softmodem_tx_interpolation2-1
gainshift = ceil(log2(softmodem_tx_interpolation2**2))
else:
ratio = 0
gainshift = 0
#Write the registers
self._reg_write(model.vars.TXFRONT_INT2CFG_RATIO, ratio)
self._reg_write(model.vars.TXFRONT_INT2CFG_GAINSHIFT, gainshift)
def calc_softmodem_txircal(self, model):
#Read in model vars
divn = model.vars.fpll_divn.value
divx = model.vars.fpll_divx.value
#divn: [kt, int2_ratio, int2_gainshift]
txircal_dict = {
80:[5,5,5],
81:[9,9,7],
85:[5,17,9],
86:[5,43,11],
93:[3,31,10],
97:[2,97,14],
98:[7,7,6],
99:[9,11,7],
100:[5,5,5],
101:[2,101,14],
103:[2,103,14],
111:[3,37,11],
114:[3,19,9],
115:[5,23,10],
116:[5,29,10],
117:[9,13,8],
119:[3,59,12]
}
try:
softmodem_txircal_params = txircal_dict[divn]
except KeyError:
LogMgr.Error('Invalid RFFPLL divn for softmodem tx calibration')
softmodem_txircal_params = []
softmodem_txircal_freq = 0
else:
fft_size_log2 = 6
txircal_dec0 = 4
softmodem_txircal_freq = ((1 << (17 - fft_size_log2)) * softmodem_txircal_params[0] * divx * softmodem_txircal_params[1]) / txircal_dec0 / divn
model.vars.softmodem_txircal_params.value = softmodem_txircal_params
model.vars.softmodem_txircal_freq.value = int(softmodem_txircal_freq)
def calc_srccfg_ratio_reg(self, model):
#This method calulates the softmodem SRCCFG RATIO value
#Read in model vars
softmodem_modulator_select = model.vars.softmodem_modulator_select.value
softmodem_modulation_type = model.vars.softmodem_modulation_type.value
dac_freq_actual = model.vars.dac_freq_actual.value
baudrate = model.vars.baudrate.value
softmodem_tx_interpolation1 = model.vars.softmodem_tx_interpolation1.value
softmodem_tx_interpolation2 = model.vars.softmodem_tx_interpolation2.value
# Set only when OFDM modulator is used
if softmodem_modulator_select == model.vars.softmodem_modulator_select.var_enum.IQ_MOD:
if softmodem_modulation_type == model.vars.softmodem_modulation_type.var_enum.SUN_OFDM:
ratio = (2**18) * (2.0*baudrate*softmodem_tx_interpolation1*softmodem_tx_interpolation2)/dac_freq_actual #2^18 * (2*OFDM_RATE*INT1*INT2)/DAC_FREQ
else:
ratio = (2**18) #Ratio of 1 for CW
else:
ratio = 0
#Write the reg
self._reg_write(model.vars.TXFRONT_SRCCFG_RATIO, int(ratio))
def calc_tx_baud_rate_actual(self, model):
#Read in model vars
softmodem_modulator_select = model.vars.softmodem_modulator_select.value
softmodem_modulation_type = model.vars.softmodem_modulation_type.value
softmodem_tx_interpolation1_actual = model.vars.TXFRONT_INT1CFG_RATIO.value + 1
softmodem_tx_interpolation2_actual = model.vars.TXFRONT_INT2CFG_RATIO.value + 1
txfront_srccfg_ratio_actual = model.vars.TXFRONT_SRCCFG_RATIO.value/(2**18)
dac_freq_actual = model.vars.dac_freq_actual.value
if softmodem_modulator_select == model.vars.softmodem_modulator_select.var_enum.IQ_MOD and\
softmodem_modulation_type == model.vars.softmodem_modulation_type.var_enum.SUN_OFDM:
tx_baud_rate_actual = dac_freq_actual/softmodem_tx_interpolation1_actual/softmodem_tx_interpolation2_actual*txfront_srccfg_ratio_actual/2.0
model.vars.tx_baud_rate_actual.value = tx_baud_rate_actual
else:
super().calc_tx_baud_rate_actual(model)
def calc_txmix_regs(self, model):
#This method calculates the RAC_TXMIX fields as well as the RAC_PATRIM6_TXTRIMFILGAIN field
softmodem_modulator_select = model.vars.softmodem_modulator_select.value
base_frequency_hz = model.vars.base_frequency_hz.value
#Choose regsiter settings based on RF band
if softmodem_modulator_select == model.vars.softmodem_modulator_select.var_enum.IQ_MOD:
if base_frequency_hz < 450e6:
#430M Band
txtrimfilgain = 2
txselmixctune = 43
txselmixgmslicei = 9
txselmixgmsliceq = 9
txselmixrload = 0
txselmixband = 0
txmixcappulldown = 1
elif base_frequency_hz < 520e6:
# 470M Band
txtrimfilgain = 2
txselmixctune = 31
txselmixgmslicei = 8
txselmixgmsliceq = 8
txselmixrload = 0
txselmixband = 0
txmixcappulldown = 1
elif base_frequency_hz < 625e6:
# 570M Band
txtrimfilgain = 2
txselmixctune = 13
txselmixgmslicei = 6
txselmixgmsliceq = 6
txselmixrload = 2
txselmixband = 0
txmixcappulldown = 1
elif base_frequency_hz < 730e6:
# 680M Band
txtrimfilgain = 2
txselmixctune = 3
txselmixgmslicei = 6
txselmixgmsliceq = 6
txselmixrload = 5
txselmixband = 0
txmixcappulldown = 1
elif base_frequency_hz < 825e6:
# 780M Band
txtrimfilgain = 2
txselmixctune = 24
txselmixgmslicei = 7
txselmixgmsliceq = 7
txselmixrload = 1
txselmixband = 1
txmixcappulldown = 0
elif base_frequency_hz < 895e6:
# 870M Band
txtrimfilgain = 2
txselmixctune = 14
txselmixgmslicei = 6
txselmixgmsliceq = 6
txselmixrload = 2
txselmixband = 1
txmixcappulldown = 0
elif base_frequency_hz < 940e6:
# 920M Band (settings from Eric Vapillon)
txtrimfilgain = 2
txselmixctune = 9
txselmixgmslicei = 6
txselmixgmsliceq = 6
txselmixrload = 4
txselmixband = 1
txmixcappulldown = 0
else:
# 960M Band
txtrimfilgain = 2
txselmixctune = 5
txselmixgmslicei = 6
txselmixgmsliceq = 6
txselmixrload = 5
txselmixband = 1
txmixcappulldown = 0
else:
#Use POR values
txtrimfilgain = 1
txselmixctune = 0
txselmixgmslicei = 4
txselmixgmsliceq = 4
txselmixrload = 0
txselmixband = 0
txmixcappulldown = 0
#Write the register fields
self._reg_write(model.vars.RAC_PATRIM6_TXTRIMFILGAIN, txtrimfilgain)
self._reg_write(model.vars.RAC_TXMIX_TXSELMIXCTUNE, txselmixctune)
self._reg_write(model.vars.RAC_TXMIX_TXSELMIXGMSLICEI, txselmixgmslicei)
self._reg_write(model.vars.RAC_TXMIX_TXSELMIXGMSLICEQ, txselmixgmsliceq)
self._reg_write(model.vars.RAC_TXMIX_TXSELMIXRLOAD, txselmixrload)
self._reg_write(model.vars.RAC_TXMIX_TXSELMIXBAND, txselmixband)
self._reg_write(model.vars.RAC_TXMIX_TXMIXCAPPULLDOWN, txmixcappulldown)
def calc_symbol_rates_actual(self, model):
modulation_type = model.vars.modulation_type.value
max_bit_rate = model.vars.bitrate.value #We already store the max bitrate here
if (modulation_type == model.vars.modulation_type.var_enum.OFDM):
# Symbol rate is constant for OFDM: 1/120us
ofdm_tsym = 120e-6
ofdm_symbol_rate = 1/ofdm_tsym
# baud per symbol is not used in OFDM
baud_per_symbol = 1
# bits_per_symbol corresponds to the maximum bit rate (MCS6) for a given option over the symbol rate:
bits_per_symbol = int(max_bit_rate / ofdm_symbol_rate)
# Update model variables
model.vars.ofdm_symbol_rate.value = ofdm_symbol_rate
model.vars.baud_per_symbol_actual.value = baud_per_symbol
model.vars.bits_per_symbol_actual.value = bits_per_symbol
else:
# Call Ocelot version
super().calc_symbol_rates_actual(model)
def calc_txbases_reg(self, model):
# Read in model variables
preamble_length = model.vars.preamble_length.value # This is the TX preamble length
preamble_pattern_len_actual = model.vars.preamble_pattern_len_actual.value
softmodem_active = (model.vars.softmodem_modulation_type.value != model.vars.softmodem_modulation_type.var_enum.NONE)
if softmodem_active:
#If the softmodem is active, the preamble bits will come from the softmodem so set TXBASES=0
txbases = 0
else:
txbases = preamble_length / preamble_pattern_len_actual
if (txbases) > 0xffff:
LogMgr.Error("Calculated TX preamble sequences (TXBASE) value of %s exceeds limit of 65535! Adjust preamble inputs." % txbases)
# Write the register
self._reg_write(model.vars.MODEM_PRE_TXBASES, int(txbases))
| 43.273743
| 161
| 0.64472
|
d48280fe293e628fb3992f59c0a57072ecd0ab71
| 521
|
py
|
Python
|
learnHTTP/error.py
|
ClariNerd617/personalProjects
|
926b02c9998f9d17233f91d38a7250240c1b80dc
|
[
"CC0-1.0"
] | null | null | null |
learnHTTP/error.py
|
ClariNerd617/personalProjects
|
926b02c9998f9d17233f91d38a7250240c1b80dc
|
[
"CC0-1.0"
] | null | null | null |
learnHTTP/error.py
|
ClariNerd617/personalProjects
|
926b02c9998f9d17233f91d38a7250240c1b80dc
|
[
"CC0-1.0"
] | null | null | null |
import requests
from requests.exceptions import HTTPError
for url in ['https://api.github.com', 'https://api.github.com/invalid']:
print(url) # for testing
try:
response = requests.get(url)
# if response was successful, no exception will be raised
response.raise_for_status()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('Success!')
| 32.5625
| 72
| 0.637236
|
1abae18b1318f68270dd725a2fb1b40f309b33ab
| 17,887
|
py
|
Python
|
src/simulation/dynamics/sphericalPendulum/_UnitTest/test_sphericalPendulum.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | null | null | null |
src/simulation/dynamics/sphericalPendulum/_UnitTest/test_sphericalPendulum.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | 1
|
2019-03-13T20:52:22.000Z
|
2019-03-13T20:52:22.000Z
|
src/simulation/dynamics/sphericalPendulum/_UnitTest/test_sphericalPendulum.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | null | null | null |
''' '''
'''
ISC License
Copyright (c) 2016, Autonomous Vehicle Systems Lab, University of Colorado at Boulder
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import sys, os, inspect
import pytest
import matplotlib.pyplot as plt
import numpy as np
filename = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(filename))
from Basilisk.utilities import SimulationBaseClass
from Basilisk.utilities import unitTestSupport # general support file with common unit test functions
from Basilisk.simulation import spacecraftPlus
from Basilisk.simulation import sphericalPendulum
from Basilisk.simulation import gravityEffector
from Basilisk.utilities import simIncludeGravBody
from Basilisk.utilities import orbitalMotion
from Basilisk.utilities import macros
from Basilisk.simulation import fuelTank
from Basilisk.simulation import thrusterDynamicEffector
from Basilisk.utilities import simIncludeThruster
@pytest.mark.parametrize("useFlag, testCase", [
(False, 1),
(False, 2),
(False,3)
])
# provide a unique test method name, starting with test_
def test_scenarioSphericalPendulum(show_plots, useFlag, testCase):
'''This function is called by the py.test environment.'''
# each test method requires a single assert method to be called
[testResults, testMessage] = sphericalPendulumTest(show_plots, useFlag, testCase)
assert testResults < 1, testMessage
def sphericalPendulumTest(show_plots, useFlag,testCase):
'''Call this routine directly to run the test scenario.'''
testFailCount = 0 # zero unit test result counter
testMessages = [] # create empty array to store test log messages
if testCase == 1 or testCase == 3:
timeStep = 0.01
if testCase == 2:
timeStep = 0.001
simTaskName = "simTask"
simProcessName = "simProcess"
# create simulation
scSim=SimulationBaseClass.SimBaseClass()
# close possible other simulation
scSim.TotalSim.terminateSimulation()
#crete a dynamical process
dynProcess = scSim.CreateNewProcess(simProcessName)
simulationTimeStep = macros.sec2nano(timeStep)
# add task to the dynamical process
dynProcess.addTask(scSim.CreateNewTask(simTaskName, simulationTimeStep))
# create spacecraft object
scObject = spacecraftPlus.SpacecraftPlus()
scObject.ModelTag = "spacecraftBody"
scSim.AddModelToTask(simTaskName, scObject)
# Pendulum 1
scSim.pendulum1 = sphericalPendulum.SphericalPendulum()
# Define Variables for pendulum 1
scSim.pendulum1.pendulumRadius = 0.3 # m/s
scSim.pendulum1.d = [[0.1], [0.1], [0.1]] # m
scSim.pendulum1.D = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] # N*s/m
scSim.pendulum1.nameOfPhiState = "sphericalPendulumPhi1"
scSim.pendulum1.nameOfPhiDotState = "sphericalPendulumPhiDot1"
scSim.pendulum1.nameOfThetaState= "sphericalPendulumTheta1"
scSim.pendulum1.nameOfThetaDotState= "sphericalPendulumThetaDot1"
scSim.pendulum1.nameOfMassState = "sphericalPendulumMass1"
scSim.pendulum1.phiDotInit = 0.01 # rad/s
scSim.pendulum1.thetaDotInit = 0.05 # rad/s
scSim.pendulum1.massInit = 20.0 # kg
scSim.pendulum1.pHat_01=[[np.sqrt(2)/2], [0] , [np.sqrt(2)/2]] # first unit vector of the Pendulum frame
scSim.pendulum1.pHat_02=[[0],[1],[0]] # second unit vector of the Pendulum frame
scSim.pendulum1.pHat_03=[[-np.sqrt(2)/2],[0],[np.sqrt(2)/2]] # third unit vector of the Pendulum frame
# Pendulum 2
scSim.pendulum2 = sphericalPendulum.SphericalPendulum()
# Define Variables for pendulum 2
scSim.pendulum2.pendulumRadius = 0.4 # m/s
scSim.pendulum2.d = [[0.1], [0.1], [0.1]] # m
scSim.pendulum2.D = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] # N*s/m
scSim.pendulum2.nameOfPhiState = "sphericalPendulumPhi2"
scSim.pendulum2.nameOfPhiDotState = "sphericalPendulumPhiDot2"
scSim.pendulum2.nameOfThetaState= "sphericalPendulumTheta2"
scSim.pendulum2.nameOfThetaDotState= "sphericalPendulumThetaDot2"
scSim.pendulum2.nameOfMassState = "sphericalPendulumMass2"
scSim.pendulum2.phiDotInit = 0.1 # rad/s
scSim.pendulum2.thetaDotInit = 0.5 # rad/s
scSim.pendulum2.massInit =40.0 # kg
# Pendulum frame same as Body frame
if testCase == 3:
thrusterCommandName = "acs_thruster_cmds"
# add thruster devices
thFactory = simIncludeThruster.thrusterFactory()
thFactory.create('MOOG_Monarc_445',
[1,0,0], # location in S frame
[0,1,0] # direction in S frame
)
# create thruster object container and tie to spacecraft object
thrustersDynamicEffector = thrusterDynamicEffector.ThrusterDynamicEffector()
thFactory.addToSpacecraft("Thrusters",
thrustersDynamicEffector,
scObject)
scSim.fuelTankStateEffector = fuelTank.FuelTank()
scSim.fuelTankStateEffector.setTankModel(fuelTank.TANK_MODEL_CONSTANT_VOLUME)
tankModel = fuelTank.cvar.FuelTankModelConstantVolume
tankModel.propMassInit = 40.0
tankModel.r_TcT_TInit = [[0.0],[0.0],[0.0]]
scSim.fuelTankStateEffector.r_TB_B = [[0.0],[0.0],[0.0]]
tankModel.radiusTankInit = 46.0 / 2.0 / 3.2808399 / 12.0
# Add tank and thruster
scObject.addStateEffector(scSim.fuelTankStateEffector)
scSim.fuelTankStateEffector.addThrusterSet(thrustersDynamicEffector)
# set thruster commands
ThrustMessage = thrusterDynamicEffector.THRArrayOnTimeCmdIntMsg()
msgSize = ThrustMessage.getStructSize()
ThrustMessage.OnTimeRequest = [5.0]
scSim.TotalSim.CreateNewMessage(simProcessName, thrusterCommandName, msgSize, 2)
scSim.TotalSim.WriteMessageData(thrusterCommandName, msgSize, 0, ThrustMessage)
# Add test module to runtime call list
scSim.AddModelToTask(simTaskName, scSim.fuelTankStateEffector)
scSim.AddModelToTask(simTaskName, thrustersDynamicEffector)
scSim.TotalSim.logThisMessage(scSim.fuelTankStateEffector.FuelTankOutMsgName, simulationTimeStep)
# Add particles to tank to activate mass depletion
scSim.fuelTankStateEffector.pushFuelSloshParticle(scSim.pendulum1)
scSim.fuelTankStateEffector.pushFuelSloshParticle(scSim.pendulum2)
# define hub properties
scObject.hub.mHub = 1500 # kg
scObject.hub.r_BcB_B = [[1.0], [0.5], [0.1]] # m
scObject.hub.IHubPntBc_B = [[900.0, 0.0, 0.0], [0.0, 800.0, 0.0], [0.0, 0.0, 600.0]] # kg*m^2
scObject.hub.sigma_BNInit = [[0.0], [0.0], [0.0]] # rad
scObject.hub.omega_BN_BInit = [[1.0], [0.5], [0.1]] # rad/s
# Add fuel slosh to spacecraft
scObject.addStateEffector(scSim.pendulum1)
scObject.addStateEffector(scSim.pendulum2)
# call for a fresh copy of the gravitational body factory
gravFactory = simIncludeGravBody.gravBodyFactory()
planet = gravFactory.createEarth()
planet.isCentralBody = True # ensure this is the central gravitational body
planetRadius = planet.radEquator
mu = planet.mu
# attach gravity to the spacecraft
scObject.gravField.gravBodies = spacecraftPlus.GravBodyVector(gravFactory.gravBodies.values())
# initialize orbital elements
oe = orbitalMotion.ClassicElements()
oe.a=6700.0*1000
oe.e=0.01
oe.omega=100.0*macros.D2R
oe.Omega=100.0*macros.D2R
oe.i=30.0*macros.D2R
oe.f=0.0
# convert them in position and velocity
rN, vN = orbitalMotion.elem2rv(mu, oe)
# attach the state to the spacecraft
scObject.hub.r_CN_NInit = unitTestSupport.np2EigenVectorXd(rN) # m - r_BN_N
scObject.hub.v_CN_NInit = unitTestSupport.np2EigenVectorXd(vN) # m/s - v_BN_N
simulationTime = macros.sec2nano(10)
#
# Setup data logging before the simulation is initialized
#
numDataPoints = 100
samplingTime = simulationTime / (numDataPoints-1)
scSim.TotalSim.logThisMessage(scObject.scStateOutMsgName, samplingTime)
# initialize Simulation: This function clears the simulation log, and runs the self_init()
# cross_init() and reset() routines on each module.
# If the routine InitializeSimulationAndDiscover() is run instead of InitializeSimulation(),
# then the all messages are auto-discovered that are shared across different BSK threads.
#
scSim.InitializeSimulation()
scSim.AddVariableForLogging(scObject.ModelTag + ".totOrbEnergy", simulationTimeStep, 0, 0, 'double')
scSim.AddVariableForLogging(scObject.ModelTag + ".totOrbAngMomPntN_N", simulationTimeStep, 0, 2, 'double')
scSim.AddVariableForLogging(scObject.ModelTag + ".totRotAngMomPntC_N", simulationTimeStep, 0, 2, 'double')
scSim.AddVariableForLogging(scObject.ModelTag + ".totRotEnergy", simulationTimeStep, 0, 0, 'double')
scSim.AddVariableForLogging("spacecraftBody.dynManager.getStateObject('sphericalPendulumPhi1').getState()", simulationTimeStep, 0, 0, 'double')
scSim.AddVariableForLogging("spacecraftBody.dynManager.getStateObject('sphericalPendulumTheta1').getState()", simulationTimeStep, 0, 0, 'double')
scSim.AddVariableForLogging("spacecraftBody.dynManager.getStateObject('sphericalPendulumThetaDot1').getState()", simulationTimeStep, 0, 0, 'double')
scSim.AddVariableForLogging("spacecraftBody.dynManager.getStateObject('sphericalPendulumPhiDot1').getState()", simulationTimeStep, 0, 0, 'double')
if testCase == 3:
scSim.AddVariableForLogging(
"spacecraftBody.dynManager.getStateObject('sphericalPendulumMass1').getState()", simulationTimeStep, 0, 0, 'double')
scSim.AddVariableForLogging(
"spacecraftBody.dynManager.getStateObject('sphericalPendulumMass2').getState()", simulationTimeStep, 0, 0, 'double')
#
# configure a simulation stop time time and execute the simulation run
#
scSim.ConfigureStopTime(simulationTime)
scSim.ExecuteSimulation()
if testCase == 3:
fuelMass = scSim.pullMessageLogData(scSim.fuelTankStateEffector.FuelTankOutMsgName + '.fuelMass',
range(1))
fuelMassDot = scSim.pullMessageLogData(scSim.fuelTankStateEffector.FuelTankOutMsgName + '.fuelMassDot',
range(1))
mass1Out = scSim.GetLogVariableData(
"spacecraftBody.dynManager.getStateObject('sphericalPendulumMass1').getState()")
mass2Out = scSim.GetLogVariableData(
"spacecraftBody.dynManager.getStateObject('sphericalPendulumMass2').getState()")
# request energy and momentum
orbEnergy = scSim.GetLogVariableData(scObject.ModelTag + ".totOrbEnergy")
orbAngMom_N = scSim.GetLogVariableData(scObject.ModelTag + ".totOrbAngMomPntN_N")
rotAngMom_N = scSim.GetLogVariableData(scObject.ModelTag + ".totRotAngMomPntC_N")
rotEnergy = scSim.GetLogVariableData(scObject.ModelTag + ".totRotEnergy")
if timeStep == 0.01:
testCaseName = "OneHundredth"
if timeStep == 0.001:
testCaseName = "OneThousandth"
plt.close("all") # clears out plots from earlier test runs
if testCase != 3:
plt.figure(1,figsize=(5,4))
plt.plot(orbAngMom_N[:,0]*1e-9, (orbAngMom_N[:,1] - orbAngMom_N[0,1])/orbAngMom_N[0,1], orbAngMom_N[:,0]*1e-9, (orbAngMom_N[:,2] - orbAngMom_N[0,2])/orbAngMom_N[0,2], orbAngMom_N[:,0]*1e-9, (orbAngMom_N[:,3] - orbAngMom_N[0,3])/orbAngMom_N[0,3])
plt.xlabel('Time (s)')
plt.ylabel('Relative Orbital Angular Momentum Variation')
unitTestSupport.writeFigureLaTeX("ChangeInOrbitalAngularMomentum" + testCaseName, "Change in Orbital Angular Momentum " + testCaseName, plt, "width=0.8\\textwidth", path)
plt.figure(2,figsize=(5,4))
plt.plot(orbEnergy[:,0]*1e-9, (orbEnergy[:,1] - orbEnergy[0,1])/orbEnergy[0,1])
plt.xlabel('Time (s)')
plt.ylabel('Relative Orbital Energy Variation')
unitTestSupport.writeFigureLaTeX("ChangeInOrbitalEnergy" + testCaseName, "Change in Orbital Energy " + testCaseName, plt, "width=0.8\\textwidth", path)
plt.figure(3,figsize=(5,4))
plt.plot(rotAngMom_N[:,0]*1e-9, (rotAngMom_N[:,1] - rotAngMom_N[0,1])/rotAngMom_N[0,1], rotAngMom_N[:,0]*1e-9, (rotAngMom_N[:,2] - rotAngMom_N[0,2])/rotAngMom_N[0,2], rotAngMom_N[:,0]*1e-9, (rotAngMom_N[:,3] - rotAngMom_N[0,3])/rotAngMom_N[0,3])
plt.xlabel('Time (s)')
plt.ylabel('Relative Rotational Angular Momentum Variation')
unitTestSupport.writeFigureLaTeX("ChangeInRotationalAngularMomentum" + testCaseName, "Change in Rotational Angular Momentum " + testCaseName, plt, "width=0.8\\textwidth", path)
plt.figure(4,figsize=(5,4))
plt.plot(rotEnergy[:,0]*1e-9, (rotEnergy[:,1] - rotEnergy[0,1])/rotEnergy[0,1])
plt.xlabel('Time (s)')
plt.ylabel('Relative Rotational Energy Variation')
unitTestSupport.writeFigureLaTeX("ChangeInRotationalEnergy" + testCaseName, "Change in Rotational Energy " + testCaseName, plt, "width=0.8\\textwidth", path)
if testCase == 3:
plt.figure()
plt.plot(fuelMass[:,0]*1e-9, fuelMass[:,1])
plt.title("Tank Fuel Mass")
plt.figure()
plt.plot(fuelMassDot[:,0]*1e-9, fuelMassDot[:,1])
plt.title("Tank Fuel Mass Dot")
plt.figure()
plt.plot(mass1Out[:,0]*1e-9, mass1Out[:,1])
plt.title("Fuel Particle 1 Mass")
plt.figure()
plt.plot(mass2Out[:,0]*1e-9, mass2Out[:,1])
plt.title("Fuel Particle 2 Mass")
mDotFuel = -0.19392039093
mDotParicle1True = mDotFuel*(20./100.)
mDotParicle2True = mDotFuel*(40./100.)
mDotParicle1Data = [0,(mass1Out[2,1] - mass1Out[1,1])/((mass1Out[2,0] - mass1Out[1,0])*1e-9)]
mDotParicle2Data = [0,(mass2Out[2,1] - mass2Out[1,1])/((mass2Out[2,0] - mass2Out[1,0])*1e-9)]
if show_plots:
plt.show()
plt.close('all')
if testCase != 3:
accuracy = 1e-8
for k in range(len((rotAngMom_N[:,1]))):
if abs((rotAngMom_N[k,1] - rotAngMom_N[0,1])/rotAngMom_N[0,1])>accuracy:
testFailCount += 1
testMessages.append("FAILED: SphericalPendulum does not conserve Rotational Angular Momentum around x axes (timeStep={}s)".format(timeStep))
if abs((rotAngMom_N[k,2] - rotAngMom_N[0,2])/rotAngMom_N[0,2])>accuracy:
testFailCount += 1
testMessages.append("FAILED: SphericalPendulum does not conserve Rotational Angular Momentum around y axes (timeStep={}s)".format(timeStep))
if abs((rotAngMom_N[k,3] - rotAngMom_N[0,3])/rotAngMom_N[0,3])>accuracy:
testFailCount += 1
testMessages.append("FAILED: SphericalPendulum does not conserve Rotational Angular Momentum around z axes (timeStep={}s)".format(timeStep))
if abs((orbAngMom_N[k,1] - orbAngMom_N[0,1])/orbAngMom_N[0,1])>accuracy:
testFailCount += 1
testMessages.append("FAILED: SphericalPendulum does not conserve Orbital Angular Momentum around x axes (timeStep={}s)".format(timeStep))
if abs((orbAngMom_N[k,2] - orbAngMom_N[0,2])/orbAngMom_N[0,2])>accuracy:
testFailCount += 1
testMessages.append("FAILED: SphericalPendulum does not conserve Orbital Angular Momentum around y axes (timeStep={}s)".format(timeStep))
if abs((orbAngMom_N[k,3] - orbAngMom_N[0,3])/orbAngMom_N[0,3])>accuracy:
testFailCount += 1
testMessages.append("FAILED: SphericalPendulum does not conserve Orbital Angular Momentum around z axes (timeStep={}s)".format(timeStep))
if abs((rotEnergy[k,1] - rotEnergy[0,1])/rotEnergy[0,1])>accuracy:
testFailCount += 1
testMessages.append("FAILED: SphericalPendulum does not conserve Rotational Energy (timeStep={}s)".format(timeStep))
if abs((orbEnergy[k,1] - orbEnergy[0,1])/orbEnergy[0,1])>accuracy:
testFailCount += 1
testMessages.append("FAILED: SphericalPendulum does not conserve Orbital Energy (timeStep={}s)".format(timeStep))
if testCase == 3:
accuracy = 1e-4
if not unitTestSupport.isDoubleEqual(mDotParicle1Data,mDotParicle1True,accuracy):
testFailCount += 1
testMessages.append("FAILED: Linear Spring Mass Damper unit test failed mass 1 dot test")
if not unitTestSupport.isDoubleEqual(mDotParicle2Data,mDotParicle2True,accuracy):
testFailCount += 1
testMessages.append("FAILED: Linear Spring Mass Damper unit test failed mass 2 dot test")
if testFailCount == 0:
print "PASSED "
else:
print testFailCount
print testMessages
return [testFailCount, ''.join(testMessages)]
if __name__ == "__main__":
sphericalPendulumTest(True, # showplots
False, # useFlag
2, # testCase
)
| 50.960114
| 253
| 0.685917
|
64d3d5b6d684df94b8d9a991950eef0f2df25e2a
| 77,976
|
py
|
Python
|
pandas/tests/arithmetic/test_timedelta64.py
|
cgangwar11/pandas
|
972f491cb7fdcc3c1c2cb9f05644128f13457f87
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-09-05T07:09:39.000Z
|
2020-09-05T07:09:39.000Z
|
pandas/tests/arithmetic/test_timedelta64.py
|
cgangwar11/pandas
|
972f491cb7fdcc3c1c2cb9f05644128f13457f87
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/arithmetic/test_timedelta64.py
|
cgangwar11/pandas
|
972f491cb7fdcc3c1c2cb9f05644128f13457f87
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-01-13T02:05:10.000Z
|
2020-01-13T02:05:10.000Z
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import OutOfBoundsDatetime, PerformanceWarning
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
NaT,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
def assert_dtype(obj, expected_dtype):
"""
Helper to check the dtype for a Series, Index, or single-column DataFrame.
"""
if isinstance(obj, DataFrame):
dtype = obj.dtypes.iat[0]
else:
dtype = obj.dtype
assert dtype == expected_dtype
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range("2H", periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
msg = "Invalid comparison between dtype"
with pytest.raises(TypeError, match=msg):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
@pytest.mark.parametrize(
"td_scalar",
[timedelta(days=1), Timedelta(days=1), Timedelta(days=1).to_timedelta64()],
)
def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar):
# regression test for GH#5963
box = box_with_array
xbox = box if box is not pd.Index else np.ndarray
ser = pd.Series([timedelta(days=1), timedelta(days=2)])
ser = tm.box_expected(ser, box)
actual = ser > td_scalar
expected = pd.Series([False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(actual, expected)
@pytest.mark.parametrize("invalid", [345600000000000, "a"])
def test_td64_comparisons_invalid(self, box_with_array, invalid):
# GH#13624 for str
box = box_with_array
rng = timedelta_range("1 days", periods=10)
obj = tm.box_expected(rng, box)
assert_invalid_comparison(obj, invalid, box)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.date_range("1970-01-01", periods=10, tz="UTC").array,
np.array(pd.date_range("1970-01-01", periods=10)),
list(pd.date_range("1970-01-01", periods=10)),
pd.date_range("1970-01-01", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_td64arr_cmp_arraylike_invalid(self, other):
# We don't parametrize this over box_with_array because listlike
# other plays poorly with assert_invalid_comparison reversed checks
rng = timedelta_range("1 days", periods=10)._data
assert_invalid_comparison(rng, other, tm.to_array)
def test_td64arr_cmp_mixed_invalid(self):
rng = timedelta_range("1 days", periods=5)._data
other = np.array([0, 1, 2, rng[3], pd.Timestamp.now()])
result = rng == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = rng != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
rng < other
with pytest.raises(TypeError, match=msg):
rng > other
with pytest.raises(TypeError, match=msg):
rng <= other
with pytest.raises(TypeError, match=msg):
rng >= other
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
@pytest.mark.parametrize("dtype", [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex(
[pd.Timedelta("1 days"), pd.NaT, pd.Timedelta("3 days")]
)
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta("3 days")])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(
[
"1 day",
pd.NaT,
"1 day 00:00:01",
pd.NaT,
"1 day 00:00:01",
"5 day 00:00:03",
]
)
tdidx2 = pd.TimedeltaIndex(
["2 day", "2 day", pd.NaT, pd.NaT, "1 day 00:00:02", "5 days 00:00:03"]
)
tdarr = np.array(
[
np.timedelta64(2, "D"),
np.timedelta64(2, "D"),
np.timedelta64("nat"),
np.timedelta64("nat"),
np.timedelta64(1, "D") + np.timedelta64(2, "s"),
np.timedelta64(5, "D") + np.timedelta64(3, "s"),
]
)
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range("1 days", periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(["2H", "4H", "6H", "8H", "10H"], freq="2H", name="x")
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["4H", "8H", "12H", "16H", "20H"], freq="4H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "4H"
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["1H", "2H", "3H", "4H", "5H"], freq="H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "H"
idx = TimedeltaIndex(["2H", "4H", "6H", "8H", "10H"], freq="2H", name="x")
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(
["-2H", "-4H", "-6H", "-8H", "-10H"], freq="-2H", name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq == "-2H"
idx = TimedeltaIndex(["-2H", "-1H", "0H", "1H", "2H"], freq="H", name="x")
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["2H", "1H", "0H", "1H", "2H"], freq=None, name="x")
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = r"unsupported operand type\(s\) for -"
with pytest.raises(TypeError, match=msg):
td - dt
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(["0 days", "1 days", "2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(["0 days", pd.NaT, "1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(["0 days", pd.NaT, "-1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
["20121231", "20130101", "20130102"], freq="D", name="bar"
)
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(["20121231", pd.NaT, "20121230"], name="foo")
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range("20130101", periods=3)
ts = Timestamp("20130101")
dt = ts.to_pydatetime()
dti_tz = pd.date_range("20130101", periods=3).tz_localize("US/Eastern")
ts_tz = Timestamp("20130101").tz_localize("US/Eastern")
ts_tz2 = Timestamp("20130101").tz_localize("CET")
dt_tz = ts_tz.to_pydatetime()
td = Timedelta("1 days")
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta("0 days")
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta("0 days")
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta("0 days")
_check(result, expected)
# tz mismatches
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta("0 days")
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(["20121231", "20130101", "20130102"], tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(["0 days", pd.NaT, "0 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(["2 days", pd.NaT, "4 days"], name="foo")
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(["20121231", pd.NaT, "20130101"])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
result = tdi + dt
expected = DatetimeIndex(["20130102", pd.NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(["20130102", pd.NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(["2 days", pd.NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(["2 days", pd.NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
msg = "Addition/subtraction of integers and integer-arrays"
with pytest.raises(TypeError, match=msg):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(["20130102", pd.NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(["20130102", pd.NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp("20130102")
assert result == expected
result = td + dt
expected = Timestamp("20130102")
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize("freq", ["D", "B"])
def test_timedelta(self, freq):
index = pd.date_range("1/1/2000", periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
back = back._with_freq("infer")
tm.assert_index_equal(index, back)
if freq == "D":
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range("2013", "2014")
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
assert result1.freq == rng.freq
result1 = result1._with_freq(None)
tm.assert_index_equal(result1, result4)
assert result3.freq == rng.freq
result3 = result3._with_freq(None)
tm.assert_index_equal(result2, result3)
def test_tda_add_sub_index(self):
# Check that TimedeltaArray defers to Index on arithmetic ops
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
tda = tdi.array
dti = pd.date_range("1999-12-31", periods=3, freq="D")
result = tda + dti
expected = tdi + dti
tm.assert_index_equal(result, expected)
result = tda + tdi
expected = tdi + tdi
tm.assert_index_equal(result, expected)
result = tda - tdi
expected = tdi - tdi
tm.assert_index_equal(result, expected)
def test_tda_add_dt64_object_array(self, box_with_array, tz_naive_fixture):
# Result should be cast back to DatetimeArray
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
dti = dti._with_freq(None)
tdi = dti - dti
obj = tm.box_expected(tdi, box)
other = tm.box_expected(dti, box)
warn = None
if box is not pd.DataFrame or tz_naive_fixture is None:
warn = PerformanceWarning
with tm.assert_produces_warning(warn):
result = obj + other.astype(object)
tm.assert_equal(result, other)
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
def test_tdi_iadd_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
orig_rng = rng
rng += two_hours
tm.assert_equal(rng, expected)
if box_with_array is not pd.Index:
# Check that operation is actually inplace
tm.assert_equal(orig_rng, expected)
def test_tdi_isub_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
orig_rng = rng
rng -= two_hours
tm.assert_equal(rng, expected)
if box_with_array is not pd.Index:
# Check that operation is actually inplace
tm.assert_equal(orig_rng, expected)
# -------------------------------------------------------------
def test_tdi_ops_attributes(self):
rng = timedelta_range("2 days", periods=5, freq="2D", name="x")
result = rng + 1 * rng.freq
exp = timedelta_range("4 days", periods=5, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
result = rng - 2 * rng.freq
exp = timedelta_range("-2 days", periods=5, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
result = rng * 2
exp = timedelta_range("4 days", periods=5, freq="4D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "4D"
result = rng / 2
exp = timedelta_range("1 days", periods=5, freq="D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "D"
result = -rng
exp = timedelta_range("-2 days", periods=5, freq="-2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "-2D"
rng = pd.timedelta_range("-2 days", periods=5, freq="D", name="x")
result = abs(rng)
exp = TimedeltaIndex(
["2 days", "1 days", "0 days", "1 days", "2 days"], name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq is None
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(["24658 days 11:15:00", "NaT"])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
# TODO: Make raised error message more informative and test
with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):
pd.to_timedelta(106580, "D") + Timestamp("2000")
with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):
Timestamp("2000") + pd.to_timedelta(106580, "D")
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], "D") + Timestamp("2000")
with pytest.raises(OverflowError, match=msg):
Timestamp("2000") + pd.to_timedelta([106580], "D")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(["5 days", _NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
(
pd.to_timedelta([_NaT, "5 days", "1 hours"])
- pd.to_timedelta(["7 seconds", _NaT, "4 hours"])
)
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(["4 days", pd.NaT])
result = pd.to_timedelta(["5 days", pd.NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, "5 hours"])
result = pd.to_timedelta([pd.NaT, "5 days", "1 hours"]) + pd.to_timedelta(
["7 seconds", pd.NaT, "4 hours"]
)
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(["00:00:01"]))
s2 = pd.to_timedelta(Series(["00:00:02"]))
msg = r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
# Passing datetime64-dtype data to TimedeltaIndex is no longer
# supported GH#29794
pd.to_timedelta(Series([pd.NaT]))
sn = pd.to_timedelta(Series([pd.NaT], dtype="m8[ns]"))
df1 = pd.DataFrame(["00:00:01"]).apply(pd.to_timedelta)
df2 = pd.DataFrame(["00:00:02"]).apply(pd.to_timedelta)
with pytest.raises(TypeError, match=msg):
# Passing datetime64-dtype data to TimedeltaIndex is no longer
# supported GH#29794
pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
dfn = pd.DataFrame([pd.NaT.value]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta("00:00:01")
scalar2 = pd.to_timedelta("00:00:02")
timedelta_NaT = pd.to_timedelta("NaT")
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
s1 + np.nan
with pytest.raises(TypeError, match=msg):
np.nan + s1
with pytest.raises(TypeError, match=msg):
s1 - np.nan
with pytest.raises(TypeError, match=msg):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
msg = "cannot subtract a datelike from|unsupported operand type"
with pytest.raises(TypeError, match=msg):
df1 + np.nan
with pytest.raises(TypeError, match=msg):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range("2012-1-1", periods=3, freq="D")
v2 = pd.date_range("2012-1-2", periods=3, freq="D")
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24, rs.index).astype("int64").astype("timedelta64[ns]")
tm.assert_series_equal(rs, xp)
assert rs.dtype == "timedelta64[ns]"
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
# series on the rhs
result = df["A"] - df["A"].shift()
assert result.dtype == "timedelta64[ns]"
result = df["A"] + td
assert result.dtype == "M8[ns]"
# scalar Timestamp on rhs
maxa = df["A"].max()
assert isinstance(maxa, Timestamp)
resultb = df["A"] - df["A"].max()
assert resultb.dtype == "timedelta64[ns]"
# timestamp on lhs
result = resultb + df["A"]
values = [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
expected = Series(values, name="A")
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df["A"] - datetime(2001, 1, 1)
expected = Series([timedelta(days=4017 + i) for i in range(3)], name="A")
tm.assert_series_equal(result, expected)
assert result.dtype == "m8[ns]"
d = datetime(2001, 1, 1, 3, 4)
resulta = df["A"] - d
assert resulta.dtype == "m8[ns]"
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df["A"], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df["A"])
assert resultb.dtype == "M8[ns]"
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(df["A"], resultb)
assert resultb.dtype == "M8[ns]"
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta("1s")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
# addition
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
# multiplication
tm.assert_series_equal(
nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta
)
tm.assert_series_equal(
1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(["1 day", "2 day"])
idx = tm.box_expected(idx, box_with_array)
msg = (
"cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation"
)
with pytest.raises(TypeError, match=msg):
idx - Timestamp("2011-01-01")
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp("2011-01-01", tz=tz)
idx = TimedeltaIndex(["1 day", "2 day"])
expected = DatetimeIndex(["2011-01-02", "2011-01-03"], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"ts",
[
Timestamp("2012-01-01"),
Timestamp("2012-01-01").to_pydatetime(),
Timestamp("2012-01-01").to_datetime64(),
],
)
def test_td64arr_add_sub_datetimelike_scalar(self, ts, box_with_array):
# GH#11925, GH#29558
tdi = timedelta_range("1 day", periods=3)
expected = pd.date_range("2012-01-02", periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range("2011-12-31", periods=3, freq="-1D")
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
msg = "cannot subtract a datelike from"
with pytest.raises(TypeError, match=msg):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64("NaT")
tdi = timedelta_range("1 day", periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Invalid __add__/__sub__ operations
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("tdi_freq", [None, "H"])
def test_td64arr_sub_periodlike(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
dti = Timestamp("2018-03-07 17:16:40") + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
msg = "cannot subtract|unsupported operand type"
with pytest.raises(TypeError, match=msg):
tdi - pi
# GH#13078 subtraction of Period scalar not supported
with pytest.raises(TypeError, match=msg):
tdi - pi[0]
@pytest.mark.parametrize(
"other",
[
# GH#12624 for str case
"a",
# GH#19123
1,
1.5,
np.array(2),
],
)
def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other):
# vector-like others are tested in test_td64arr_add_sub_numeric_arr_invalid
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
assert_invalid_addsub_type(tdarr, other)
@pytest.mark.parametrize(
"vec",
[
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3]),
DataFrame([[1, 2, 3]]),
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_addsub_numeric_arr_invalid(
self, box_with_array, vec, any_real_dtype
):
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
vector = vec.astype(any_real_dtype)
assert_invalid_addsub_type(tdarr, vector)
def test_td64arr_add_sub_int(self, box_with_array, one):
# Variants of `one` for #19012, deprecated GH#22535
rng = timedelta_range("1 days 09:00:00", freq="H", periods=10)
tdarr = tm.box_expected(rng, box_with_array)
msg = "Addition/subtraction of integers"
assert_invalid_addsub_type(tdarr, one, msg)
# TOOD: get inplace ops into assert_invalid_addsub_type
with pytest.raises(TypeError, match=msg):
tdarr += one
with pytest.raises(TypeError, match=msg):
tdarr -= one
def test_td64arr_add_sub_integer_array(self, box_with_array):
# GH#19959, deprecated GH#22535
# GH#22696 for DataFrame case, check that we don't dispatch to numpy
# implementation, which treats int64 as m8[ns]
rng = timedelta_range("1 days 09:00:00", freq="H", periods=3)
tdarr = tm.box_expected(rng, box_with_array)
other = tm.box_expected([4, 3, 2], box_with_array)
msg = "Addition/subtraction of integers and integer-arrays"
assert_invalid_addsub_type(tdarr, other, msg)
def test_td64arr_addsub_integer_array_no_freq(self, box_with_array):
# GH#19959
tdi = TimedeltaIndex(["1 Day", "NaT", "3 Hours"])
tdarr = tm.box_expected(tdi, box_with_array)
other = tm.box_expected([14, -1, 16], box_with_array)
msg = "Addition/subtraction of integers"
assert_invalid_addsub_type(tdarr, other, msg)
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_with_array):
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_with_array):
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
def test_td64arr_add_sub_tdi(self, box_with_array, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
box = box_with_array
if box is pd.DataFrame and names[1] != names[0]:
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["0 days", "1 day"], name=names[0])
tdi = np.array(tdi) if box is tm.to_array else tdi
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series(
[Timedelta(hours=3), Timedelta(days=1, hours=4)], name=names[2]
)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
result = ser + tdi
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
expected = Series(
[Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=names[2]
)
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
result = ser - tdi
tm.assert_equal(result, -expected)
assert_dtype(result, "timedelta64[ns]")
def test_td64arr_add_sub_td64_nat(self, box_with_array):
# GH#23320 special handling for timedelta64("NaT")
box = box_with_array
tdi = pd.TimedeltaIndex([NaT, Timedelta("1s")])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box_with_array):
# GH#18808
box = box_with_array
ser = Series([NaT, Timedelta("1s")])
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as + is now numeric
# GH#10699 for Tick cases
box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
result = two_hours + rng
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as - is now numeric
# GH#10699 for Tick cases
box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
result = two_hours - rng
tm.assert_equal(result, -expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
def test_td64arr_add_offset_index(self, names, box_with_array):
# GH#18849, GH#19744
box = box_with_array
if box is pd.DataFrame and names[1] != names[0]:
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
other = np.array(other) if box is tm.to_array else other
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=names[2]
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_with_array):
# GH#18849
box = box_with_array
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
def test_td64arr_sub_offset_index(self, names, box_with_array):
# GH#18824, GH#19744
box = box_with_array
xbox = box if box is not tm.to_array else pd.Index
exname = names[2] if box is not tm.to_array else names[1]
if box is pd.DataFrame and names[1] != names[0]:
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_with_offset_series(self, names, box_with_array):
# GH#18849
box = box_with_array
box2 = Series if box in [pd.Index, tm.to_array] else box
if box is pd.DataFrame:
# Since we are operating with a DataFrame and a non-DataFrame,
# the non-DataFrame is cast to Series and its name ignored.
exname = names[0]
elif box is tm.to_array:
exname = names[1]
else:
exname = names[2]
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))], name=exname)
obj = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = obj + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + obj
tm.assert_equal(res2, expected_add)
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))], name=exname)
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = obj - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize("obox", [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
msg = "has incorrect type|cannot add the type MonthEnd"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
# ------------------------------------------------------------------
# Unsorted
def test_td64arr_add_sub_object_array(self, box_with_array):
tdi = pd.timedelta_range("1 day", periods=3, freq="D")
tdarr = tm.box_expected(tdi, box_with_array)
other = np.array(
[pd.Timedelta(days=1), pd.offsets.Day(2), pd.Timestamp("2000-01-04")]
)
with tm.assert_produces_warning(PerformanceWarning):
result = tdarr + other
expected = pd.Index(
[pd.Timedelta(days=2), pd.Timedelta(days=4), pd.Timestamp("2000-01-07")]
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
msg = "unsupported operand type|cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdarr - other
with tm.assert_produces_warning(PerformanceWarning):
result = other - tdarr
expected = pd.Index(
[pd.Timedelta(0), pd.Timedelta(0), pd.Timestamp("2000-01-01")]
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
msg = "argument must be an integer|cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype="int64")
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype="int64"))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype="float64")
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize(
"other",
[
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11),
],
ids=lambda x: type(x).__name__,
)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(["1 Day"] * 10)
expected = timedelta_range("1 days", "10 days")
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError, match="unsupported operand type"):
rng / pd.NaT
with pytest.raises(TypeError, match="Cannot divide NaTType by"):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range("1 days", "10 days")
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64("NaT")
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match="Cannot divide"):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Float64Index((np.arange(10) + 1) * 12, name="foo")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
def test_td64arr_div_td64_scalar(self, m, unit, box_with_array):
startdate = Series(pd.date_range("2013-01-01", "2013-01-03"))
enddate = Series(pd.date_range("2013-03-01", "2013-03-03"))
ser = enddate - startdate
ser[2] = np.nan
flat = ser
ser = tm.box_expected(ser, box_with_array)
# op
expected = Series([x / np.timedelta64(m, unit) for x in flat])
expected = tm.box_expected(expected, box_with_array)
result = ser / np.timedelta64(m, unit)
tm.assert_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in flat])
expected = tm.box_expected(expected, box_with_array)
result = np.timedelta64(m, unit) / ser
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array):
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
expected = pd.Float64Index([12, np.nan, 24], name="foo")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype="m8[h]")
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
msg = "Cannot divide vectors|Unable to coerce to Series"
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError, match=msg):
rng / other
with pytest.raises(ValueError, match=msg):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_td64arr_with_nat(self, box_with_array):
# GH#35529
box = box_with_array
left = pd.Series([1000, 222330, 30], dtype="timedelta64[ns]")
right = pd.Series([1000, 222330, None], dtype="timedelta64[ns]")
left = tm.box_expected(left, box)
right = tm.box_expected(right, box)
expected = np.array([1.0, 1.0, np.nan], dtype=np.float64)
expected = tm.box_expected(expected, box)
result = left // right
tm.assert_equal(result, expected)
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*"
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
tdi = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Int64Index((np.arange(10) + 1) * 12, name="foo")
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize(
"scalar_td",
[
timedelta(minutes=10, seconds=7),
Timedelta("10m7s"),
Timedelta("10m7s").to_timedelta64(),
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
tdi = TimedeltaIndex(["00:05:03", "00:05:03", pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 Day", "2 Days", "0 Days"] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range("1 ns", "10 ns", periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 ns", "0 ns"] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
msg = "Cannot divide int by"
with pytest.raises(TypeError, match=msg):
2 % tdarr
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = ["0 Days", "1 Day", "0 Days"] + ["3 Days"] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
msg = (
"cannot use operands with types dtype|"
"Cannot multiply with unequal lengths|"
"Unable to coerce to Series"
)
with pytest.raises(TypeError, match=msg):
# length check before dtype check
idx * idx[:3]
with pytest.raises(ValueError, match=msg):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
msg = "cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(["118 Days", "118 Days", "NaT"], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match="Cannot divide"):
two / tdser
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rmul_numeric_array(self, box_with_array, vector, any_real_dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(any_real_dtype)
expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser * vector
tm.assert_equal(result, expected)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_div_numeric_array(self, box_with_array, vector, any_real_dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(any_real_dtype)
expected = Series(["2.95D", "1D 23H 12m", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser / vector
tm.assert_equal(result, expected)
pattern = (
"true_divide'? cannot use operands|"
"cannot perform __div__|"
"cannot perform __truediv__|"
"unsupported operand|"
"Cannot divide"
)
with pytest.raises(TypeError, match=pattern):
vector / tdser
if not isinstance(vector, pd.Index):
# Index.__rdiv__ won't try to operate elementwise, just raises
result = tdser / vector.astype(object)
if box_with_array is pd.DataFrame:
expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))]
else:
expected = [tdser[n] / vector[n] for n in range(len(tdser))]
expected = pd.Index(expected) # do dtype inference
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match=pattern):
vector.astype(object) / tdser
def test_td64arr_mul_int_series(self, box_with_array, names, request):
# GH#19042 test for correct name attachment
box = box_with_array
if box_with_array is pd.DataFrame and names[2] is None:
reason = "broadcasts along wrong axis, but doesn't raise"
request.node.add_marker(pytest.mark.xfail(reason=reason))
exname = names[2] if box is not tm.to_array else names[1]
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
# TODO: Should we be parametrizing over types for `ser` too?
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(
["0days", "1day", "4days", "9days", "16days"],
dtype="timedelta64[ns]",
name=exname,
)
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index or box is tm.to_array) else box
expected = tm.box_expected(expected, box)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
if box is pd.DataFrame:
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
def test_float_series_rdiv_td64arr(self, box_with_array, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
box = box_with_array
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
xname = names[2] if box is not tm.to_array else names[1]
expected = Series(
[tdi[n] / ser[n] for n in range(len(ser))],
dtype="timedelta64[ns]",
name=xname,
)
xbox = box
if box in [pd.Index, tm.to_array] and type(ser) is Series:
xbox = Series
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = ser.__rdiv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedelta64ArrayLikeArithmetic:
# Arithmetic tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all arithmetic
# tests will eventually end up here.
def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
scalar_td ** td1
with pytest.raises(TypeError, match=pattern):
td1 ** scalar_td
| 36.454418
| 88
| 0.603327
|
9e4331cfa67eb9493fba31e3637cd13ef0864de5
| 56,038
|
py
|
Python
|
tests/function/test_record.py
|
vaenow/aliyun-datahub-sdk-python
|
bcb82ad2f558ab34c3b767ad52c667b62c10e37e
|
[
"Apache-2.0"
] | 1
|
2021-07-07T08:15:19.000Z
|
2021-07-07T08:15:19.000Z
|
tests/function/test_record.py
|
vaenow/aliyun-datahub-sdk-python
|
bcb82ad2f558ab34c3b767ad52c667b62c10e37e
|
[
"Apache-2.0"
] | null | null | null |
tests/function/test_record.py
|
vaenow/aliyun-datahub-sdk-python
|
bcb82ad2f558ab34c3b767ad52c667b62c10e37e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import os
import sys
import time
from six.moves import configparser
from datahub import DataHub
from datahub.exceptions import ResourceExistException, InvalidOperationException, InvalidParameterException, \
ResourceNotFoundException
from datahub.models import RecordSchema, FieldType, TupleRecord, BlobRecord, CursorType, CompressFormat
from datahub.utils import to_binary
current_path = os.path.split(os.path.realpath(__file__))[0]
root_path = os.path.join(current_path, '../..')
configer = configparser.ConfigParser()
configer.read(os.path.join(current_path, '../datahub.ini'))
access_id = configer.get('datahub', 'access_id')
access_key = configer.get('datahub', 'access_key')
endpoint = configer.get('datahub', 'endpoint')
print("=======================================")
print("access_id: %s" % access_id)
print("access_key: %s" % access_key)
print("endpoint: %s" % endpoint)
print("=======================================\n\n")
if not access_id or not access_key or not endpoint:
print("[access_id, access_key, endpoint] must be set in datahub.ini!")
sys.exit(-1)
dh = DataHub(access_id, access_key, endpoint, enable_pb=False)
dh_lz4 = DataHub(access_id, access_key, endpoint, enable_pb=False, compress_format=CompressFormat.LZ4)
dh_zlib = DataHub(access_id, access_key, endpoint, enable_pb=False, compress_format=CompressFormat.ZLIB)
dh_deflate = DataHub(access_id, access_key, endpoint, enable_pb=False, compress_format=CompressFormat.DEFLATE)
dh_pb = DataHub(access_id, access_key, endpoint, enable_pb=True)
dh_pb_lz4 = DataHub(access_id, access_key, endpoint, enable_pb=True, compress_format=CompressFormat.LZ4)
dh_pb_zlib = DataHub(access_id, access_key, endpoint, enable_pb=True, compress_format=CompressFormat.ZLIB)
dh_pb_deflate = DataHub(access_id, access_key, endpoint, enable_pb=True, compress_format=CompressFormat.DEFLATE)
def clean_topic(datahub_client, project_name, force=False):
topic_names = datahub_client.list_topic(project_name).topic_names
for topic_name in topic_names:
if force:
clean_subscription(datahub_client, project_name, topic_name)
datahub_client.delete_topic(project_name, topic_name)
def clean_project(datahub_client, force=False):
project_names = datahub_client.list_project().project_names
for project_name in project_names:
if force:
clean_topic(datahub_client, project_name)
try:
datahub_client.delete_project(project_name)
except InvalidOperationException:
pass
def clean_subscription(datahub_client, project_name, topic_name):
subscriptions = datahub_client.list_subscription(project_name, topic_name, '', 1, 100).subscriptions
for subscription in subscriptions:
datahub_client.delete_subscription(project_name, topic_name, subscription.sub_id)
class TestRecord:
def test_put_get_tuple_records(self):
project_name = "record_test_p%d_1" % int(time.time())
topic_name = "record_test_t%d_1" % int(time.time())
record_schema = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field', 'decimal_field'],
[FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP,
FieldType.DECIMAL],
[False, True, True, True, True, True])
print(TupleRecord(schema=record_schema))
try:
dh.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh.create_tuple_topic(project_name, topic_name, 3, 7, record_schema, '1')
dh.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put tuple records
failed_records = []
record = TupleRecord(schema=record_schema,
values=[99, 'yc1', 10.01, None, 1455869335000000,
decimal.Decimal('12.2219999999999995310417943983338773250579833984375')])
# write by partition key
record.partition_key = 'TestPartitionKey'
put_result = dh.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by hash key
record.hash_key = '4FFFFFFFFFFFFFFD7FFFFFFFFFFFFFFD'
put_result = dh.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by shard id
record.shard_id = '0'
record.put_attribute('AK', '47')
put_result = dh.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
record1 = TupleRecord(schema=record_schema)
record1.set_value('bigint_field', 2)
record1.set_value('string_field', 'yc2')
record1.set_value('double_field', None)
record1.set_value(3, False)
record1.set_value(4, 1455869335000011)
record1.set_value(5, decimal.Decimal('12.2219999999999995310417943983338773250579833984375'))
record1.attributes = {'key': 'value'}
record1.shard_id = '0'
put_result = dh.put_records(project_name, topic_name, [record1, record1, record1])
failed_records.extend(put_result.failed_records)
print(put_result)
print("put result: %s" % put_result)
print("failed records: %s" % put_result.failed_records)
print(failed_records)
assert len(failed_records) == 0
time.sleep(2)
# ======================= get record =======================
cursor = dh.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh.get_tuple_records(project_name, topic_name, '0', record_schema, cursor.cursor, 4)
print(record_result)
print(record_result.records[3])
assert record_result.record_count == 4
assert record_result.records[0].sequence == record_result.start_seq
assert record_result.records[1].sequence == record_result.start_seq + 1
assert record_result.records[2].sequence == record_result.start_seq + 2
finally:
clean_topic(dh, project_name)
dh.delete_project(project_name)
def test_put_get_tuple_records_lz4(self):
project_name = "record_test_p%d_1" % int(time.time())
topic_name = "record_test_t%d_1" % int(time.time())
record_schema = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field', 'decimal_field'],
[FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP,
FieldType.DECIMAL],
[False, True, True, True, True, True])
try:
dh_lz4.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_lz4.create_tuple_topic(project_name, topic_name, 3, 7, record_schema, '1')
dh_lz4.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put tuple records
failed_records = []
record = TupleRecord(schema=record_schema,
values=[99, 'yc1', 10.01, True, 1455869335000000,
decimal.Decimal('12.2219999999999995310417943983338773250579833984375')])
# write by partition key
record.partition_key = 'TestPartitionKey'
put_result = dh_lz4.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by hash key
record.hash_key = '4FFFFFFFFFFFFFFD7FFFFFFFFFFFFFFD'
put_result = dh_lz4.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by shard id
record.shard_id = '0'
record.put_attribute('AK', '47')
put_result = dh_lz4.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
record1 = TupleRecord(schema=record_schema)
record1.set_value('bigint_field', 2)
record1.set_value('string_field', 'yc2')
record1.set_value('double_field', None)
record1.set_value(3, False)
record1.set_value(4, 1455869335000011)
record1.set_value(5, decimal.Decimal('12.2219999999999995310417943983338773250579833984375'))
record1.attributes = {'key': 'value'}
record1.shard_id = '0'
put_result = dh_lz4.put_records(project_name, topic_name, [record1, record1, record1])
failed_records.extend(put_result.failed_records)
print(put_result)
print("put result: %s" % put_result)
print("failed records: %s" % put_result.failed_records)
print(failed_records)
assert len(failed_records) == 0
# ======================= get record =======================
cursor = dh_lz4.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_lz4.get_tuple_records(project_name, topic_name, '0', record_schema, cursor.cursor, 4)
print(record_result)
print(record_result.records[3])
assert record_result.record_count == 4
assert record_result.records[0].sequence == record_result.start_seq
assert record_result.records[1].sequence == record_result.start_seq + 1
assert record_result.records[2].sequence == record_result.start_seq + 2
finally:
clean_topic(dh_lz4, project_name)
dh.delete_project(project_name)
def test_put_get_tuple_records_zlib(self):
project_name = "record_test_p%d_1" % int(time.time())
topic_name = "record_test_t%d_1" % int(time.time())
record_schema = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field', 'decimal_field'],
[FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP,
FieldType.DECIMAL],
[False, True, True, True, True, True])
try:
dh_zlib.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_zlib.create_tuple_topic(project_name, topic_name, 3, 7, record_schema, '1')
dh_zlib.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put tuple records
failed_records = []
record = TupleRecord(schema=record_schema,
values=[99, 'yc1', 10.01, True, 1455869335000000,
decimal.Decimal('12.2219999999999995310417943983338773250579833984375')])
# write by partition key
record.partition_key = 'TestPartitionKey'
put_result = dh_zlib.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by hash key
record.hash_key = '4FFFFFFFFFFFFFFD7FFFFFFFFFFFFFFD'
put_result = dh_zlib.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by shard id
record.shard_id = '0'
record.put_attribute('AK', '47')
put_result = dh_zlib.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
record1 = TupleRecord(schema=record_schema)
record1.set_value('bigint_field', 2)
record1.set_value('string_field', 'yc2')
record1.set_value('double_field', None)
record1.set_value(3, False)
record1.set_value(4, 1455869335000011)
record1.set_value(5, decimal.Decimal('12.2219999999999995310417943983338773250579833984375'))
record1.attributes = {'key': 'value'}
record1.shard_id = '0'
put_result = dh_zlib.put_records(project_name, topic_name, [record1, record1, record1])
failed_records.extend(put_result.failed_records)
print(put_result)
print("put result: %s" % put_result)
print("failed records: %s" % put_result.failed_records)
print(failed_records)
assert len(failed_records) == 0
# ======================= get record =======================
cursor = dh_zlib.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_zlib.get_tuple_records(project_name, topic_name, '0', record_schema, cursor.cursor, 4)
print(record_result)
print(record_result.records[3])
assert record_result.record_count == 4
assert record_result.records[0].sequence == record_result.start_seq
assert record_result.records[1].sequence == record_result.start_seq + 1
assert record_result.records[2].sequence == record_result.start_seq + 2
finally:
clean_topic(dh_zlib, project_name)
dh.delete_project(project_name)
def test_put_get_tuple_records_deflate(self):
project_name = "record_test_p%d_1" % int(time.time())
topic_name = "record_test_t%d_1" % int(time.time())
record_schema = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field', 'decimal_field'],
[FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP,
FieldType.DECIMAL],
[False, True, True, True, True, True])
try:
dh_deflate.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_deflate.create_tuple_topic(project_name, topic_name, 3, 7, record_schema, '1')
dh_deflate.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put tuple records
failed_records = []
record = TupleRecord(schema=record_schema,
values=[99, 'yc1', 10.01, True, 1455869335000000,
decimal.Decimal('12.2219999999999995310417943983338773250579833984375')])
# write by partition key
record.partition_key = 'TestPartitionKey'
put_result = dh_deflate.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by hash key
record.hash_key = '4FFFFFFFFFFFFFFD7FFFFFFFFFFFFFFD'
put_result = dh_deflate.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by shard id
record.shard_id = '0'
record.put_attribute('AK', '47')
put_result = dh_deflate.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
record1 = TupleRecord(schema=record_schema)
record1.set_value('bigint_field', 2)
record1.set_value('string_field', 'yc2')
record1.set_value('double_field', None)
record1.set_value(3, False)
record1.set_value(4, 1455869335000011)
record1.set_value(5, decimal.Decimal('12.2219999999999995310417943983338773250579833984375'))
record1.attributes = {'key': 'value'}
record1.shard_id = '0'
put_result = dh_deflate.put_records(project_name, topic_name, [record1, record1, record1])
failed_records.extend(put_result.failed_records)
print(put_result)
print("put result: %s" % put_result)
print("failed records: %s" % put_result.failed_records)
print(failed_records)
assert len(failed_records) == 0
# ======================= get record =======================
cursor = dh_deflate.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_deflate.get_tuple_records(project_name, topic_name, '0', record_schema, cursor.cursor, 4)
print(record_result)
print(record_result.records[3])
assert record_result.record_count == 4
assert record_result.records[0].sequence == record_result.start_seq
assert record_result.records[1].sequence == record_result.start_seq + 1
assert record_result.records[2].sequence == record_result.start_seq + 2
finally:
clean_topic(dh_deflate, project_name)
dh.delete_project(project_name)
def test_put_get_tuple_records_pb(self):
project_name = "record_test_p%d_1" % int(time.time())
topic_name = "record_test_t%d_1" % int(time.time())
record_schema = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field', 'decimal_field'],
[FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP,
FieldType.DECIMAL],
[False, True, True, True, True, True])
try:
dh_pb.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_pb.create_tuple_topic(project_name, topic_name, 3, 7, record_schema, '1')
dh_pb.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put tuple records
failed_records = []
record = TupleRecord(schema=record_schema,
values=[99, 'yc1', 10.01, True, None,
decimal.Decimal('12.2219999999999995310417943983338773250579833984375')])
# write by partition key
record.partition_key = 'TestPartitionKey'
put_result = dh_pb.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by hash key
record.hash_key = '4FFFFFFFFFFFFFFD7FFFFFFFFFFFFFFD'
put_result = dh_pb.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by shard id
record.shard_id = '0'
record.put_attribute('AK', '47')
put_result = dh_pb.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# test failed records
record1 = TupleRecord(schema=record_schema)
record1.values = [99, 'yc1', 10.01, True, 1455869335000000,
decimal.Decimal('12.2219999999999995310417943983338773250579833984375')]
record1.shard_id = '-1'
record1.put_attribute('a', 'b')
put_result = dh_pb.put_records(project_name, topic_name, [record1, record1, record1])
failed_records.extend(put_result.failed_records)
print(put_result)
print("put result: %s" % put_result)
print("failed records: %s" % put_result.failed_records)
print(failed_records)
assert len(failed_records) == 3
for i in range(0, 3):
assert failed_records[i].error_code == 'InvalidShardId'
assert failed_records[i].error_message == 'Invalid shard id: -1'
# ======================= get record =======================
cursor = dh_pb.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_pb.get_tuple_records(project_name, topic_name, '0', record_schema, cursor.cursor, 6)
print(record_result)
assert record_result.record_count == 2
assert record_result.records[0].values == record.values
assert record_result.records[0].sequence == record_result.start_seq
assert record_result.records[1].sequence == record_result.start_seq + 1
finally:
clean_topic(dh_pb, project_name)
dh_pb.delete_project(project_name)
def test_put_get_tuple_records_pb_lz4(self):
project_name = "record_test_p%d_1" % int(time.time())
topic_name = "record_test_t%d_1" % int(time.time())
record_schema = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field'],
[FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP])
try:
dh_pb_lz4.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_pb_lz4.create_tuple_topic(project_name, topic_name, 3, 7, record_schema, '1')
dh_pb_lz4.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put tuple records
failed_records = []
record = TupleRecord(schema=record_schema, values=[99, 'yc1', 10.01, True, 1455869335000000])
# write by partition key
record.partition_key = 'TestPartitionKey'
put_result = dh_pb_lz4.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by hash key
record.hash_key = '4FFFFFFFFFFFFFFD7FFFFFFFFFFFFFFD'
put_result = dh_pb_lz4.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by shard id
record.shard_id = '0'
record.put_attribute('AK', '47')
put_result = dh_pb_lz4.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
record1 = TupleRecord(schema=record_schema)
record1.values = [99, 'yc1', 10.01, True, 1455869335000000]
record1.shard_id = '0'
record1.put_attribute('a', 'b')
put_result = dh_pb_lz4.put_records(project_name, topic_name, [record1, record1, record1])
failed_records.extend(put_result.failed_records)
print(put_result)
print("put result: %s" % put_result)
print("failed records: %s" % put_result.failed_records)
print(failed_records)
assert len(failed_records) == 0
# ======================= get record =======================
cursor = dh_pb_lz4.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_pb_lz4.get_tuple_records(project_name, topic_name, '0', record_schema, cursor.cursor, 3)
print(record_result)
assert record_result.record_count == 3
assert record_result.records[0].values == record.values
assert record_result.records[0].sequence == record_result.start_seq
assert record_result.records[1].sequence == record_result.start_seq + 1
assert record_result.records[2].sequence == record_result.start_seq + 2
finally:
clean_topic(dh_pb_lz4, project_name)
dh_pb_lz4.delete_project(project_name)
def test_put_get_tuple_records_pb_zlib(self):
project_name = "record_test_p%d_1" % int(time.time())
topic_name = "record_test_t%d_1" % int(time.time())
record_schema = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field'],
[FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP])
try:
dh_pb_zlib.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_pb_zlib.create_tuple_topic(project_name, topic_name, 3, 7, record_schema, '1')
dh_pb_zlib.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put tuple records
failed_records = []
record = TupleRecord(schema=record_schema, values=[99, 'yc1', 10.01, True, 1455869335000000])
# write by partition key
record.partition_key = 'TestPartitionKey'
put_result = dh_pb_zlib.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by hash key
record.hash_key = '4FFFFFFFFFFFFFFD7FFFFFFFFFFFFFFD'
put_result = dh_pb_zlib.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by shard id
record.shard_id = '0'
record.put_attribute('AK', '47')
put_result = dh_pb_zlib.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
record1 = TupleRecord(schema=record_schema)
record1.values = [99, 'yc1', 10.01, True, 1455869335000000]
record1.shard_id = '0'
record1.put_attribute('a', 'b')
put_result = dh_pb_zlib.put_records(project_name, topic_name, [record1, record1, record1])
failed_records.extend(put_result.failed_records)
print(put_result)
print("put result: %s" % put_result)
print("failed records: %s" % put_result.failed_records)
print(failed_records)
assert len(failed_records) == 0
# ======================= get record =======================
cursor = dh_pb_zlib.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_pb_zlib.get_tuple_records(project_name, topic_name, '0', record_schema, cursor.cursor, 3)
print(record_result)
assert record_result.record_count == 3
assert record_result.records[0].values == record.values
assert record_result.records[0].sequence == record_result.start_seq
assert record_result.records[1].sequence == record_result.start_seq + 1
assert record_result.records[2].sequence == record_result.start_seq + 2
finally:
clean_topic(dh_pb_zlib, project_name)
dh_pb_zlib.delete_project(project_name)
def test_put_get_tuple_records_pb_deflate(self):
project_name = "record_test_p%d_1" % int(time.time())
topic_name = "record_test_t%d_1" % int(time.time())
record_schema = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field'],
[FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP])
try:
dh_pb_deflate.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_pb_deflate.create_tuple_topic(project_name, topic_name, 3, 7, record_schema, '1')
dh_pb_deflate.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put tuple records
failed_records = []
record = TupleRecord(schema=record_schema, values=[99, 'yc1', 10.01, True, 1455869335000000])
# write by partition key
record.partition_key = 'TestPartitionKey'
put_result = dh_pb_deflate.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by hash key
record.hash_key = '4FFFFFFFFFFFFFFD7FFFFFFFFFFFFFFD'
put_result = dh_pb_deflate.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
# write by shard id
record.shard_id = '0'
record.put_attribute('AK', '47')
put_result = dh_pb_deflate.put_records(project_name, topic_name, [record])
failed_records.extend(put_result.failed_records)
record1 = TupleRecord(schema=record_schema)
record1.values = [99, 'yc1', 10.01, True, 1455869335000000]
record1.shard_id = '0'
record1.put_attribute('a', 'b')
put_result = dh_pb_deflate.put_records(project_name, topic_name, [record1, record1, record1])
failed_records.extend(put_result.failed_records)
print(put_result)
print("put result: %s" % put_result)
print("failed records: %s" % put_result.failed_records)
print(failed_records)
assert len(failed_records) == 0
# ======================= get record =======================
cursor = dh_pb_deflate.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_pb_deflate.get_tuple_records(project_name, topic_name, '0', record_schema, cursor.cursor,
3)
print(record_result)
assert record_result.record_count == 3
assert record_result.records[0].values == record.values
assert record_result.records[0].sequence == record_result.start_seq
assert record_result.records[1].sequence == record_result.start_seq + 1
assert record_result.records[2].sequence == record_result.start_seq + 2
finally:
clean_topic(dh_pb_deflate, project_name)
dh_pb_deflate.delete_project(project_name)
def test_put_get_blob_records(self):
project_name = "record_test_p%d_2" % int(time.time())
topic_name = "record_test_t%d_2" % int(time.time())
try:
dh.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh.create_blob_topic(project_name, topic_name, 3, 7, '')
dh.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put blob record
data = None
with open(os.path.join(root_path, 'tests/resources/datahub.png'), 'rb') as f:
data = f.read()
records = []
record0 = BlobRecord(blob_data=data)
record0.shard_id = '0'
record0.put_attribute('a', 'b')
records.append(record0)
failed_indices = (dh.put_records(project_name, topic_name, records)).failed_records
assert len(failed_indices) == 0
# ======================= get record =======================
cursor = dh.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh.get_blob_records(project_name, topic_name, '0', cursor.cursor, 3)
print(record_result)
assert record_result.record_count == 1
assert record_result.records[0].blob_data == data
finally:
clean_topic(dh, project_name)
dh.delete_project(project_name)
def test_put_get_blob_records_lz4(self):
project_name = "record_test_p%d_2" % int(time.time())
topic_name = "record_test_t%d_2" % int(time.time())
try:
dh_lz4.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_lz4.create_blob_topic(project_name, topic_name, 3, 7, '')
dh_lz4.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put blob record
data = None
with open(os.path.join(root_path, 'tests/resources/datahub.png'), 'rb') as f:
data = f.read()
records = []
record0 = BlobRecord(blob_data=data)
record0.shard_id = '0'
record0.put_attribute('a', 'b')
records.append(record0)
failed_indices = (dh_lz4.put_records(project_name, topic_name, records)).failed_records
assert len(failed_indices) == 0
# ======================= get record =======================
cursor = dh_lz4.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_lz4.get_blob_records(project_name, topic_name, '0', cursor.cursor, 3)
print(record_result)
assert record_result.record_count == 1
assert record_result.records[0].blob_data == data
finally:
clean_topic(dh_lz4, project_name)
dh_lz4.delete_project(project_name)
def test_put_get_blob_records_zlib(self):
project_name = "record_test_p%d_2" % int(time.time())
topic_name = "record_test_t%d_2" % int(time.time())
try:
dh_zlib.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_zlib.create_blob_topic(project_name, topic_name, 3, 7, '')
dh_zlib.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put blob record
data = None
with open(os.path.join(root_path, 'tests/resources/datahub.png'), 'rb') as f:
data = f.read()
records = []
record0 = BlobRecord(blob_data=data)
record0.shard_id = '0'
record0.put_attribute('a', 'b')
records.append(record0)
failed_indices = (dh_zlib.put_records(project_name, topic_name, records)).failed_records
assert len(failed_indices) == 0
# ======================= get record =======================
cursor = dh_zlib.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_zlib.get_blob_records(project_name, topic_name, '0', cursor.cursor, 3)
print(record_result)
assert record_result.record_count == 1
assert record_result.records[0].blob_data == data
finally:
clean_topic(dh_zlib, project_name)
dh_zlib.delete_project(project_name)
def test_put_get_blob_records_deflate(self):
project_name = "record_test_p%d_2" % int(time.time())
topic_name = "record_test_t%d_2" % int(time.time())
try:
dh_deflate.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_deflate.create_blob_topic(project_name, topic_name, 3, 7, '')
dh_deflate.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put blob record
data = None
with open(os.path.join(root_path, 'tests/resources/datahub.png'), 'rb') as f:
data = f.read()
records = []
record0 = BlobRecord(blob_data=data)
record0.shard_id = '0'
record0.put_attribute('a', 'b')
records.append(record0)
failed_indices = (dh_deflate.put_records(project_name, topic_name, records)).failed_records
assert len(failed_indices) == 0
# ======================= get record =======================
cursor = dh_deflate.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_deflate.get_blob_records(project_name, topic_name, '0', cursor.cursor, 3)
print(record_result)
assert record_result.record_count == 1
assert record_result.records[0].blob_data == data
finally:
clean_topic(dh_deflate, project_name)
dh_deflate.delete_project(project_name)
def test_put_get_blob_records_pb(self):
project_name = "record_test_p%d_2" % int(time.time())
topic_name = "record_test_t%d_2" % int(time.time())
try:
dh_pb.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_pb.create_blob_topic(project_name, topic_name, 3, 7, '')
dh_pb.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put blob record
data = None
with open(os.path.join(root_path, 'tests/resources/datahub.png'), 'rb') as f:
data = f.read()
records = []
record0 = BlobRecord(blob_data=data)
record0.shard_id = '0'
record0.put_attribute('a', 'b')
records.append(record0)
failed_indices = (dh_pb.put_records(project_name, topic_name, records)).failed_records
assert len(failed_indices) == 0
# ======================= get record =======================
cursor = dh_pb.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_pb.get_blob_records(project_name, topic_name, '0', cursor.cursor, 3)
print(record_result)
assert record_result.record_count == 1
assert record_result.records[0].blob_data == data
finally:
clean_topic(dh_pb, project_name)
dh_pb.delete_project(project_name)
def test_put_get_blob_records_pb_lz4(self):
project_name = "record_test_p%d_2" % int(time.time())
topic_name = "record_test_t%d_2" % int(time.time())
try:
dh_pb_lz4.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_pb_lz4.create_blob_topic(project_name, topic_name, 3, 7, '')
dh_pb_lz4.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put blob record
data = None
with open(os.path.join(root_path, 'tests/resources/datahub.png'), 'rb') as f:
data = f.read()
records = []
record0 = BlobRecord(blob_data=data)
record0.shard_id = '0'
record0.put_attribute('a', 'b')
records.append(record0)
failed_indices = (dh_pb_lz4.put_records(project_name, topic_name, records)).failed_records
assert len(failed_indices) == 0
# ======================= get record =======================
cursor = dh_pb_lz4.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_pb_lz4.get_blob_records(project_name, topic_name, '0', cursor.cursor, 3)
print(record_result)
assert record_result.record_count == 1
assert record_result.records[0].blob_data == data
finally:
clean_topic(dh_pb_lz4, project_name)
dh_pb_lz4.delete_project(project_name)
def test_put_get_blob_records_pb_zlib(self):
project_name = "record_test_p%d_2" % int(time.time())
topic_name = "record_test_t%d_2" % int(time.time())
try:
dh_pb_zlib.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_pb_zlib.create_blob_topic(project_name, topic_name, 3, 7, '')
dh_pb_zlib.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put blob record
data = None
with open(os.path.join(root_path, 'tests/resources/datahub.png'), 'rb') as f:
data = f.read()
records = []
record0 = BlobRecord(blob_data=data)
record0.shard_id = '0'
record0.put_attribute('a', 'b')
records.append(record0)
failed_indices = (dh_pb_zlib.put_records(project_name, topic_name, records)).failed_records
assert len(failed_indices) == 0
# ======================= get record =======================
cursor = dh_pb_zlib.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_pb_zlib.get_blob_records(project_name, topic_name, '0', cursor.cursor, 3)
print(record_result)
assert record_result.record_count == 1
assert record_result.records[0].blob_data == data
finally:
clean_topic(dh_pb_zlib, project_name)
dh_pb_zlib.delete_project(project_name)
def test_put_get_blob_records_pb_deflate(self):
project_name = "record_test_p%d_2" % int(time.time())
topic_name = "record_test_t%d_2" % int(time.time())
try:
dh_pb_deflate.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_pb_deflate.create_blob_topic(project_name, topic_name, 3, 7, '')
dh_pb_deflate.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put blob record
data = None
with open(os.path.join(root_path, 'tests/resources/datahub.png'), 'rb') as f:
data = f.read()
records = []
record0 = BlobRecord(blob_data=data)
record0.shard_id = '0'
record0.put_attribute('a', 'b')
records.append(record0)
failed_indices = (dh_pb_deflate.put_records(project_name, topic_name, records)).failed_records
assert len(failed_indices) == 0
# ======================= get record =======================
cursor = dh_pb_deflate.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_pb_deflate.get_blob_records(project_name, topic_name, '0', cursor.cursor, 3)
print(record_result)
assert record_result.record_count == 1
assert record_result.records[0].blob_data == data
finally:
clean_topic(dh_pb_deflate, project_name)
dh_pb_deflate.delete_project(project_name)
def test_put_tuple_records_by_shard_id_pb(self):
project_name = "record_test_p%d_3" % int(time.time())
topic_name = "record_test_t%d_3" % int(time.time())
record_schema = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field', 'decimal_field'],
[FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP,
FieldType.DECIMAL],
[False, True, True, True, True, True])
try:
dh_pb.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_pb.create_tuple_topic(project_name, topic_name, 3, 7, record_schema, '1')
dh_pb.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
record = TupleRecord(schema=record_schema,
values=[99, 'yc1', 10.01, True, 1455869335000000,
decimal.Decimal('12.2219999999999995310417943983338773250579833984375')])
dh_pb.put_records_by_shard(project_name, topic_name, "0", [record, record, record])
record1 = TupleRecord(schema=record_schema)
record1.set_value('bigint_field', 2)
record1.set_value('string_field', 'yc2')
record1.set_value('double_field', None)
record1.set_value(3, False)
record1.set_value(4, 1455869335000011)
record1.set_value(5, decimal.Decimal('12.2219999999999995310417943983338773250579833984375'))
record1.attributes = {'key': 'value'}
dh_pb.put_records_by_shard(project_name, topic_name, "0", [record1, record1, record1])
# ======================= get record =======================
time.sleep(1)
cursor = dh_pb.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_pb.get_tuple_records(project_name, topic_name, '0', record_schema, cursor.cursor, 6)
print(record_result)
assert record_result.record_count == 6
finally:
clean_topic(dh_pb, project_name)
dh_pb.delete_project(project_name)
def test_put_tuple_records_by_shard_id_pb_failed(self):
project_name = "record_test_p%d_4" % int(time.time())
topic_name = "record_test_t%d_4" % int(time.time())
record_schema = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field', 'decimal_field'],
[FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP,
FieldType.DECIMAL],
[False, True, True, True, True, True])
try:
dh_pb.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_pb.create_tuple_topic(project_name, topic_name, 3, 7, record_schema, '1')
dh_pb.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
record = TupleRecord(schema=record_schema,
values=[99, 'yc1', 10.01, True, 1455869335000000,
decimal.Decimal('12.2219999999999995310417943983338773250579833984375')])
wrong_record_schema = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field', 'decimal_field'],
[FieldType.STRING, FieldType.STRING, FieldType.STRING, FieldType.STRING, FieldType.STRING,
FieldType.STRING])
wrong_record_schema_2 = RecordSchema.from_lists(
['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field'],
[FieldType.STRING, FieldType.STRING, FieldType.STRING, FieldType.STRING, FieldType.STRING])
wrong_record = TupleRecord(schema=wrong_record_schema_2,
values=[99, 'yc1', 10.01, True, 1455869335000000])
wrong_record_2 = TupleRecord(schema=wrong_record_schema,
values=[99, 'yc1', 'a', True, 1455869335000000,
'12.2219999999999995310417943983338773250579833984375'])
wrong_record_3 = TupleRecord(schema=wrong_record_schema,
values=['99', 'yc1', '10.01', 'true', '253402271999000001', '12.12'])
wrong_record_4 = TupleRecord(schema=wrong_record_schema,
values=['99', 'a', '10.01', 'true', '1455869335000000', '12.12'])
wrong_record_5 = TupleRecord(schema=wrong_record_schema,
values=['99', 'a', '10.01', 'true', '1455869335000000', '-'])
# ======================= invalid shard id =======================
try:
dh_pb.put_records_by_shard(project_name, topic_name, "-1", [record])
except ResourceNotFoundException as e:
assert e.error_msg == 'ShardId Not Exist. Invalid shard id:' + project_name + '/' + topic_name + '/-1'
# ======================= field size not match =======================
try:
dh_pb.put_records_by_shard(project_name, topic_name, "0", [wrong_record])
except InvalidParameterException as e:
assert e.error_msg == 'Record field size not match'
# ======================= type error =======================
try:
dh_pb.put_records_by_shard(project_name, topic_name, "0", [wrong_record_2])
except InvalidParameterException as e:
assert e.error_msg == 'Cannot cast empty string to d'
# ======================= project not existed =======================
try:
dh_pb.put_records_by_shard('a', topic_name, "0", [record])
except InvalidParameterException as e:
assert e.error_msg == 'Project name is missing or invalid:a'
# ======================= topic not existed =======================
try:
dh_pb.put_records_by_shard(project_name, 'a', "0", [record])
except ResourceNotFoundException as e:
assert e.error_msg == 'The specified topic name does not exist.'
# ======================= invalid timestamp =======================
try:
dh_pb.put_records_by_shard(project_name, topic_name, "0", [wrong_record_3])
except InvalidParameterException as e:
assert e.error_msg == 'Timestamp field value over range: 253402271999000001'
# ======================= invalid string length =======================
try:
wrong_record_4._values[1] = 'a' * (1024 * 1024 + 1)
dh_pb.put_records_by_shard(project_name, topic_name, "0", [wrong_record_4])
except InvalidParameterException as e:
assert e.error_msg == 'String field length: 1048577 exceed max length: 1048576'
# ======================= invalid string length =======================
try:
dh_pb.put_records_by_shard(project_name, topic_name, "0", [wrong_record_5])
except InvalidParameterException as e:
assert e.error_msg == 'Decimal field invalid: -'
finally:
clean_topic(dh_pb, project_name)
dh_pb.delete_project(project_name)
def test_put_blob_records_by_shard_id_pb(self):
project_name = "record_test_p%d_4" % int(time.time())
topic_name = "record_test_t%d_4" % int(time.time())
try:
dh_pb.create_project(project_name, '')
except ResourceExistException:
pass
# make sure project wil be deleted
try:
try:
dh_pb.create_blob_topic(project_name, topic_name, 3, 7, '')
dh_pb.wait_shards_ready(project_name, topic_name)
except ResourceExistException:
pass
# ======================= put record =======================
# put blob record
data = to_binary('blob data')
records = []
record0 = BlobRecord(blob_data=data)
record0.shard_id = '0'
record0.put_attribute('a', 'b')
records.append(record0)
dh_pb.put_records_by_shard(project_name, topic_name, "0", records)
# ======================= get record =======================
time.sleep(1)
cursor = dh_pb.get_cursor(project_name, topic_name, '0', CursorType.OLDEST)
record_result = dh_pb.get_blob_records(project_name, topic_name, '0', cursor.cursor, 3)
print(record_result)
assert record_result.record_count == 1
assert record_result.records[0].blob_data == data
finally:
clean_topic(dh_pb, project_name)
dh_pb.delete_project(project_name)
# run directly
if __name__ == '__main__':
test = TestRecord()
test.test_put_get_tuple_records()
test.test_put_get_tuple_records_lz4()
test.test_put_get_tuple_records_zlib()
test.test_put_get_tuple_records_deflate()
test.test_put_get_blob_records()
test.test_put_get_blob_records_lz4()
test.test_put_get_blob_records_zlib()
test.test_put_get_blob_records_deflate()
test.test_put_get_tuple_records_pb()
test.test_put_get_tuple_records_pb_lz4()
test.test_put_get_tuple_records_pb_zlib()
test.test_put_get_tuple_records_pb_deflate()
test.test_put_get_blob_records_pb()
test.test_put_get_blob_records_pb_lz4()
test.test_put_get_blob_records_pb_zlib()
test.test_put_get_blob_records_pb_deflate()
test.test_put_tuple_records_by_shard_id_pb()
test.test_put_tuple_records_by_shard_id_pb_failed()
test.test_put_blob_records_by_shard_id_pb()
| 44.228887
| 120
| 0.596149
|
b2637ce1eed7c44545f6b7758b9fc75c79aff502
| 1,085
|
py
|
Python
|
saleor/discount/__init__.py
|
TysonRV/saleor
|
83d3ca5ae163fa853ab311d0ebcdbc22f4788166
|
[
"BSD-3-Clause"
] | 9
|
2021-08-08T22:42:18.000Z
|
2021-11-23T06:50:14.000Z
|
saleor/discount/__init__.py
|
TysonRV/saleor
|
83d3ca5ae163fa853ab311d0ebcdbc22f4788166
|
[
"BSD-3-Clause"
] | 86
|
2018-03-08T14:19:19.000Z
|
2018-05-12T14:55:16.000Z
|
saleor/discount/__init__.py
|
TysonRV/saleor
|
83d3ca5ae163fa853ab311d0ebcdbc22f4788166
|
[
"BSD-3-Clause"
] | 2
|
2018-01-25T06:09:07.000Z
|
2018-01-25T20:55:34.000Z
|
from django.conf import settings
from django.utils.translation import pgettext_lazy
class DiscountValueType:
FIXED = 'fixed'
PERCENTAGE = 'percentage'
CHOICES = [
(FIXED, pgettext_lazy(
'Discount type', settings.DEFAULT_CURRENCY)),
(PERCENTAGE, pgettext_lazy('Discount type', '%'))]
class VoucherType:
PRODUCT = 'product'
CATEGORY = 'category'
SHIPPING = 'shipping'
VALUE = 'value'
CHOICES = [
(VALUE, pgettext_lazy('Voucher: discount for', 'All purchases')),
(PRODUCT, pgettext_lazy('Voucher: discount for', 'One product')),
(CATEGORY, pgettext_lazy(
'Voucher: discount for', 'A category of products')),
(SHIPPING, pgettext_lazy('Voucher: discount for', 'Shipping'))]
class VoucherApplyToProduct:
ONE_PRODUCT = 'one'
ALL_PRODUCTS = 'all'
CHOICES = [
(ONE_PRODUCT, pgettext_lazy(
'Voucher application', 'Apply to a single item')),
(ALL_PRODUCTS, pgettext_lazy(
'Voucher application', 'Apply to all matching products'))]
| 28.552632
| 73
| 0.637788
|
18454d09c083e3df8dea1da759072ada3f80ca4c
| 534
|
py
|
Python
|
devListener.py
|
math2001/kpymap
|
3f4aa8f3e82dbe936ad3446a3cf1e83a9a152208
|
[
"MIT"
] | 1
|
2017-07-31T11:53:44.000Z
|
2017-07-31T11:53:44.000Z
|
devListener.py
|
math2001/kpymap
|
3f4aa8f3e82dbe936ad3446a3cf1e83a9a152208
|
[
"MIT"
] | 17
|
2017-07-28T08:54:01.000Z
|
2017-08-04T05:54:57.000Z
|
devListener.py
|
math2001/kpymap
|
3f4aa8f3e82dbe936ad3446a3cf1e83a9a152208
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
import sublime
import sublime_plugin
import os.path
class MLPDevListener(sublime_plugin.EventListener):
def on_post_save(self, view):
if not (os.path.dirname(__file__) in view.file_name() and
view.file_name().endswith('.py')):
return
sublime.run_command('reload_plugin', {
'main': os.path.join(sublime.packages_path(), 'kpymap',
'kpymap.py'),
'scripts': ['__init__'],
'quiet': True
})
| 29.666667
| 67
| 0.565543
|
1f8ba0e881146b7cccb996677cec3d4996867cf2
| 1,639
|
py
|
Python
|
test/pyaz/vmss/encryption/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
test/pyaz/vmss/encryption/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | 9
|
2021-09-24T16:37:24.000Z
|
2021-12-24T00:39:19.000Z
|
test/pyaz/vmss/encryption/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def enable(resource_group, name, disk_encryption_keyvault, key_encryption_keyvault=None, key_encryption_key=None, key_encryption_algorithm=None, volume_type=None, force=None):
params = get_params(locals())
command = "az vmss encryption enable " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def disable(resource_group, name, volume_type=None, force=None):
params = get_params(locals())
command = "az vmss encryption disable " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(resource_group, name):
params = get_params(locals())
command = "az vmss encryption show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 35.630435
| 175
| 0.680293
|
9ef1f3aa8cd3f3dc5c1ad18cc8d5ebb857eb78b0
| 6,704
|
py
|
Python
|
servers/forms.py
|
devopsconsulting/webvirtmgr
|
cc35978cae4a1a53b93056df946896ae992fe176
|
[
"Apache-2.0"
] | 1
|
2015-08-27T17:11:29.000Z
|
2015-08-27T17:11:29.000Z
|
servers/forms.py
|
devopsconsulting/webvirtmgr
|
cc35978cae4a1a53b93056df946896ae992fe176
|
[
"Apache-2.0"
] | null | null | null |
servers/forms.py
|
devopsconsulting/webvirtmgr
|
cc35978cae4a1a53b93056df946896ae992fe176
|
[
"Apache-2.0"
] | 1
|
2021-07-18T14:09:37.000Z
|
2021-07-18T14:09:37.000Z
|
import re
from django import forms
from django.utils.translation import ugettext_lazy as _
from servers.models import Compute
class ComputeAddTcpForm(forms.Form):
name = forms.CharField(error_messages={'required': _('No hostname has been entered')},
max_length=20)
hostname = forms.CharField(error_messages={'required': _('No IP / Domain name has been entered')},
max_length=100)
login = forms.CharField(error_messages={'required': _('No login has been entered')},
max_length=100)
password = forms.CharField(error_messages={'required': _('No password has been entered')},
max_length=100)
def clean_name(self):
name = self.cleaned_data['name']
have_symbol = re.match('[^a-zA-Z0-9._-]+', name)
if have_symbol:
raise forms.ValidationError(_('The host name must not contain any special characters'))
elif len(name) > 20:
raise forms.ValidationError(_('The host name must not exceed 20 characters'))
try:
Compute.objects.get(name=name)
except Compute.DoesNotExist:
return name
raise forms.ValidationError(_('This host is already connected'))
def clean_hostname(self):
hostname = self.cleaned_data['hostname']
have_symbol = re.match('[^a-z0-9.-]+', hostname)
wrong_ip = re.match('^0.|^255.', hostname)
if have_symbol:
raise forms.ValidationError(_('Hostname must contain only numbers, or the domain name separated by "."'))
elif wrong_ip:
raise forms.ValidationError(_('Wrong IP address'))
try:
Compute.objects.get(hostname=hostname)
except Compute.DoesNotExist:
return hostname
raise forms.ValidationError(_('This host is already connected'))
class ComputeAddSshForm(forms.Form):
name = forms.CharField(error_messages={'required': _('No hostname has been entered')},
max_length=20)
hostname = forms.CharField(error_messages={'required': _('No IP / Domain name has been entered')},
max_length=100)
login = forms.CharField(error_messages={'required': _('No login has been entered')},
max_length=20)
def clean_name(self):
name = self.cleaned_data['name']
have_symbol = re.match('[^a-zA-Z0-9._-]+', name)
if have_symbol:
raise forms.ValidationError(_('The name of the host must not contain any special characters'))
elif len(name) > 20:
raise forms.ValidationError(_('The name of the host must not exceed 20 characters'))
try:
Compute.objects.get(name=name)
except Compute.DoesNotExist:
return name
raise forms.ValidationError(_('This host is already connected'))
def clean_hostname(self):
hostname = self.cleaned_data['hostname']
have_symbol = re.match('[^a-zA-Z0-9._-]+', hostname)
wrong_ip = re.match('^0.|^255.', hostname)
if have_symbol:
raise forms.ValidationError(_('Hostname must contain only numbers, or the domain name separated by "."'))
elif wrong_ip:
raise forms.ValidationError(_('Wrong IP address'))
try:
Compute.objects.get(hostname=hostname)
except Compute.DoesNotExist:
return hostname
raise forms.ValidationError(_('This host is already connected'))
class ComputeAddTlsForm(forms.Form):
name = forms.CharField(error_messages={'required': _('No hostname has been entered')},
max_length=20)
hostname = forms.CharField(error_messages={'required': _('No IP / Domain name has been entered')},
max_length=100)
login = forms.CharField(error_messages={'required': _('No login has been entered')},
max_length=100)
password = forms.CharField(error_messages={'required': _('No password has been entered')},
max_length=100)
def clean_name(self):
name = self.cleaned_data['name']
have_symbol = re.match('[^a-zA-Z0-9._-]+', name)
if have_symbol:
raise forms.ValidationError(_('The host name must not contain any special characters'))
elif len(name) > 20:
raise forms.ValidationError(_('The host name must not exceed 20 characters'))
try:
Compute.objects.get(name=name)
except Compute.DoesNotExist:
return name
raise forms.ValidationError(_('This host is already connected'))
def clean_hostname(self):
hostname = self.cleaned_data['hostname']
have_symbol = re.match('[^a-z0-9.-]+', hostname)
wrong_ip = re.match('^0.|^255.', hostname)
if have_symbol:
raise forms.ValidationError(_('Hostname must contain only numbers, or the domain name separated by "."'))
elif wrong_ip:
raise forms.ValidationError(_('Wrong IP address'))
try:
Compute.objects.get(hostname=hostname)
except Compute.DoesNotExist:
return hostname
raise forms.ValidationError(_('This host is already connected'))
class ComputeEditHostForm(forms.Form):
host_id = forms.CharField()
name = forms.CharField(error_messages={'required': _('No hostname has been entered')},
max_length=20)
hostname = forms.CharField(error_messages={'required': _('No IP / Domain name has been entered')},
max_length=100)
login = forms.CharField(error_messages={'required': _('No login has been entered')},
max_length=100)
password = forms.CharField(max_length=100)
def clean_name(self):
name = self.cleaned_data['name']
have_symbol = re.match('[^a-zA-Z0-9._-]+', name)
if have_symbol:
raise forms.ValidationError(_('The name of the host must not contain any special characters'))
elif len(name) > 20:
raise forms.ValidationError(_('The name of the host must not exceed 20 characters'))
return name
def clean_hostname(self):
hostname = self.cleaned_data['hostname']
have_symbol = re.match('[^a-zA-Z0-9._-]+', hostname)
wrong_ip = re.match('^0.|^255.', hostname)
if have_symbol:
raise forms.ValidationError(_('Hostname must contain only numbers, or the domain name separated by "."'))
elif wrong_ip:
raise forms.ValidationError(_('Wrong IP address'))
return hostname
| 44.993289
| 117
| 0.615752
|
b8c26797532a5bb510d494cf8712a7d45d50ac25
| 3,853
|
py
|
Python
|
sdk/python/pulumi_aws/ec2/ami_launch_permission.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/ami_launch_permission.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/ami_launch_permission.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class AmiLaunchPermission(pulumi.CustomResource):
account_id: pulumi.Output[str]
"""
An AWS Account ID to add launch permissions.
"""
image_id: pulumi.Output[str]
"""
A region-unique name for the AMI.
"""
def __init__(__self__, resource_name, opts=None, account_id=None, image_id=None, __props__=None, __name__=None, __opts__=None):
"""
Adds launch permission to Amazon Machine Image (AMI) from another AWS account.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.AmiLaunchPermission("example",
account_id="123456789012",
image_id="ami-12345678")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: An AWS Account ID to add launch permissions.
:param pulumi.Input[str] image_id: A region-unique name for the AMI.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_id is None:
raise TypeError("Missing required property 'account_id'")
__props__['account_id'] = account_id
if image_id is None:
raise TypeError("Missing required property 'image_id'")
__props__['image_id'] = image_id
super(AmiLaunchPermission, __self__).__init__(
'aws:ec2/amiLaunchPermission:AmiLaunchPermission',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, account_id=None, image_id=None):
"""
Get an existing AmiLaunchPermission resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: An AWS Account ID to add launch permissions.
:param pulumi.Input[str] image_id: A region-unique name for the AMI.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["account_id"] = account_id
__props__["image_id"] = image_id
return AmiLaunchPermission(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 39.721649
| 134
| 0.660005
|
92c2d044eff1791afb63c82621f20beaa2482f46
| 51,569
|
py
|
Python
|
intelprot/aardvark/aardvark_py.py
|
shuang4intel/intelprot
|
b89f26f3d586d0ea91dc5156ccf1cefc50e329f9
|
[
"Intel",
"Apache-2.0"
] | 1
|
2022-01-20T01:58:12.000Z
|
2022-01-20T01:58:12.000Z
|
intelprot/aardvark/aardvark_py.py
|
shuang4intel/intelprot
|
b89f26f3d586d0ea91dc5156ccf1cefc50e329f9
|
[
"Intel",
"Apache-2.0"
] | null | null | null |
intelprot/aardvark/aardvark_py.py
|
shuang4intel/intelprot
|
b89f26f3d586d0ea91dc5156ccf1cefc50e329f9
|
[
"Intel",
"Apache-2.0"
] | null | null | null |
#==========================================================================
# Aardvark Interface Library
#--------------------------------------------------------------------------
# Copyright (c) 2002-2020 Total Phase, Inc.
# All rights reserved.
# www.totalphase.com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# - Neither the name of Total Phase, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#--------------------------------------------------------------------------
# To access Aardvark devices through the API:
#
# 1) Use one of the following shared objects:
# aardvark.so -- Linux shared object
# aardvark.dll -- Windows dynamic link library
#
# 2) Along with one of the following language modules:
# aardvark.c/h -- C/C++ API header file and interface module
# aardvark_py.py -- Python API
# aardvark.bas -- Visual Basic 6 API
# aardvark.cs -- C# .NET source
# aardvark_net.dll -- Compiled .NET binding
#==========================================================================
#==========================================================================
# VERSION
#==========================================================================
AA_API_VERSION = 0x0532 # v5.50
AA_REQ_SW_VERSION = 0x050a # v5.10
#==========================================================================
# IMPORTS
#==========================================================================
import os
import struct
import sys
from array import array, ArrayType
def import_library ():
global api
import platform
ext = platform.system() == 'Windows' and '.dll' or '.so'
dir = os.path.dirname(os.path.abspath(__file__))
lib = os.path.join(dir, 'aardvark' + ext)
try:
if sys.version_info >= (3, 4):
from importlib.machinery import ExtensionFileLoader
from importlib.util import spec_from_file_location
from importlib.util import module_from_spec
loader = ExtensionFileLoader('aardvark', lib)
spec = spec_from_file_location('aardvark', loader=loader)
api = module_from_spec(spec)
spec.loader.exec_module(api)
else:
import imp
api = imp.load_dynamic('aardvark', lib)
except:
_, err, _ = sys.exc_info()
msg = 'Error while importing aardvark%s:\n%s' % (ext, err)
sys.exit(msg)
try:
import aardvark as api
except ImportError:
import_library()
del import_library
AA_SW_VERSION = 0x5000532 & 0xffff #api.py_version() & 0xffff
AA_REQ_API_VERSION = (0x5000532 >> 16) & 0xffff #(api.py_version() >> 16) & 0xffff
AA_LIBRARY_LOADED = \
((AA_SW_VERSION >= AA_REQ_SW_VERSION) and \
(AA_API_VERSION >= AA_REQ_API_VERSION))
#==========================================================================
# HELPER FUNCTIONS
#==========================================================================
def array_u08 (n): return array('B', [0]*n)
def array_u16 (n): return array('H', [0]*n)
def array_u32 (n): return array('I', [0]*n)
def array_u64 (n): return array('K', [0]*n)
def array_s08 (n): return array('b', [0]*n)
def array_s16 (n): return array('h', [0]*n)
def array_s32 (n): return array('i', [0]*n)
def array_s64 (n): return array('L', [0]*n)
def array_f32 (n): return array('f', [0]*n)
def array_f64 (n): return array('d', [0]*n)
#==========================================================================
# STATUS CODES
#==========================================================================
# All API functions return an integer which is the result of the
# transaction, or a status code if negative. The status codes are
# defined as follows:
# enum AardvarkStatus
# General codes (0 to -99)
AA_OK = 0
AA_UNABLE_TO_LOAD_LIBRARY = -1
AA_UNABLE_TO_LOAD_DRIVER = -2
AA_UNABLE_TO_LOAD_FUNCTION = -3
AA_INCOMPATIBLE_LIBRARY = -4
AA_INCOMPATIBLE_DEVICE = -5
AA_COMMUNICATION_ERROR = -6
AA_UNABLE_TO_OPEN = -7
AA_UNABLE_TO_CLOSE = -8
AA_INVALID_HANDLE = -9
AA_CONFIG_ERROR = -10
# I2C codes (-100 to -199)
AA_I2C_NOT_AVAILABLE = -100
AA_I2C_NOT_ENABLED = -101
AA_I2C_READ_ERROR = -102
AA_I2C_WRITE_ERROR = -103
AA_I2C_SLAVE_BAD_CONFIG = -104
AA_I2C_SLAVE_READ_ERROR = -105
AA_I2C_SLAVE_TIMEOUT = -106
AA_I2C_DROPPED_EXCESS_BYTES = -107
AA_I2C_BUS_ALREADY_FREE = -108
# SPI codes (-200 to -299)
AA_SPI_NOT_AVAILABLE = -200
AA_SPI_NOT_ENABLED = -201
AA_SPI_WRITE_ERROR = -202
AA_SPI_SLAVE_READ_ERROR = -203
AA_SPI_SLAVE_TIMEOUT = -204
AA_SPI_DROPPED_EXCESS_BYTES = -205
# GPIO codes (-400 to -499)
AA_GPIO_NOT_AVAILABLE = -400
# I2C bus monitor codes (-500 to -599)
AA_I2C_MONITOR_NOT_AVAILABLE = -500
AA_I2C_MONITOR_NOT_ENABLED = -501
#==========================================================================
# GENERAL TYPE DEFINITIONS
#==========================================================================
# Aardvark handle type definition
# typedef Aardvark => integer
# Deprecated type definitions.
#
# These are only for use with legacy code and
# should not be used for new development.
# typedef aa_u08 => integer
# typedef aa_u16 => integer
# typedef aa_u32 => integer
# typedef aa_s08 => integer
# typedef aa_s16 => integer
# typedef aa_s32 => integer
# Aardvark version matrix.
#
# This matrix describes the various version dependencies
# of Aardvark components. It can be used to determine
# which component caused an incompatibility error.
#
# All version numbers are of the format:
# (major << 8) | minor
#
# ex. v1.20 would be encoded as: 0x0114
class AardvarkVersion:
def __init__ (self):
# Software, firmware, and hardware versions.
self.software = 0
self.firmware = 0
self.hardware = 0
# Firmware requires that software must be >= this version.
self.sw_req_by_fw = 0
# Software requires that firmware must be >= this version.
self.fw_req_by_sw = 0
# Software requires that the API interface must be >= this version.
self.api_req_by_sw = 0
#==========================================================================
# GENERAL API
#==========================================================================
# Get a list of ports to which Aardvark devices are attached.
#
# nelem = maximum number of elements to return
# devices = array into which the port numbers are returned
#
# Each element of the array is written with the port number.
# Devices that are in-use are ORed with AA_PORT_NOT_FREE (0x8000).
#
# ex. devices are attached to ports 0, 1, 2
# ports 0 and 2 are available, and port 1 is in-use.
# array => 0x0000, 0x8001, 0x0002
#
# If the array is NULL, it is not filled with any values.
# If there are more devices than the array size, only the
# first nmemb port numbers will be written into the array.
#
# Returns the number of devices found, regardless of the
# array size.
AA_PORT_NOT_FREE = 0x8000
def aa_find_devices (devices):
"""usage: (int return, u16[] devices) = aa_find_devices(u16[] devices)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# devices pre-processing
__devices = isinstance(devices, int)
if __devices:
(devices, num_devices) = (array_u16(devices), devices)
else:
(devices, num_devices) = isinstance(devices, ArrayType) and (devices, len(devices)) or (devices[0], min(len(devices[0]), int(devices[1])))
if devices.typecode != 'H':
raise TypeError("type for 'devices' must be array('H')")
# Call API function
(_ret_) = api.py_aa_find_devices(num_devices, devices)
# devices post-processing
if __devices: del devices[max(0, min(_ret_, len(devices))):]
return (_ret_, devices)
# Get a list of ports to which Aardvark devices are attached.
#
# This function is the same as aa_find_devices() except that
# it returns the unique IDs of each Aardvark device. The IDs
# are guaranteed to be non-zero if valid.
#
# The IDs are the unsigned integer representation of the 10-digit
# serial numbers.
def aa_find_devices_ext (devices, unique_ids):
"""usage: (int return, u16[] devices, u32[] unique_ids) = aa_find_devices_ext(u16[] devices, u32[] unique_ids)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# devices pre-processing
__devices = isinstance(devices, int)
if __devices:
(devices, num_devices) = (array_u16(devices), devices)
else:
(devices, num_devices) = isinstance(devices, ArrayType) and (devices, len(devices)) or (devices[0], min(len(devices[0]), int(devices[1])))
if devices.typecode != 'H':
raise TypeError("type for 'devices' must be array('H')")
# unique_ids pre-processing
__unique_ids = isinstance(unique_ids, int)
if __unique_ids:
(unique_ids, num_ids) = (array_u32(unique_ids), unique_ids)
else:
(unique_ids, num_ids) = isinstance(unique_ids, ArrayType) and (unique_ids, len(unique_ids)) or (unique_ids[0], min(len(unique_ids[0]), int(unique_ids[1])))
if unique_ids.typecode != 'I':
raise TypeError("type for 'unique_ids' must be array('I')")
# Call API function
(_ret_) = api.py_aa_find_devices_ext(num_devices, num_ids, devices, unique_ids)
# devices post-processing
if __devices: del devices[max(0, min(_ret_, len(devices))):]
# unique_ids post-processing
if __unique_ids: del unique_ids[max(0, min(_ret_, len(unique_ids))):]
return (_ret_, devices, unique_ids)
# Open the Aardvark port.
#
# The port number is a zero-indexed integer.
#
# The port number is the same as that obtained from the
# aa_find_devices() function above.
#
# Returns an Aardvark handle, which is guaranteed to be
# greater than zero if it is valid.
#
# This function is recommended for use in simple applications
# where extended information is not required. For more complex
# applications, the use of aa_open_ext() is recommended.
def aa_open (port_number):
"""usage: Aardvark return = aa_open(int port_number)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_open(port_number)
# Open the Aardvark port, returning extended information
# in the supplied structure. Behavior is otherwise identical
# to aa_open() above. If 0 is passed as the pointer to the
# structure, this function is exactly equivalent to aa_open().
#
# The structure is zeroed before the open is attempted.
# It is filled with whatever information is available.
#
# For example, if the firmware version is not filled, then
# the device could not be queried for its version number.
#
# This function is recommended for use in complex applications
# where extended information is required. For more simple
# applications, the use of aa_open() is recommended.
class AardvarkExt:
def __init__ (self):
# Version matrix
self.version = AardvarkVersion()
# Features of this device.
self.features = 0
def aa_open_ext (port_number):
"""usage: (Aardvark return, AardvarkExt aa_ext) = aa_open_ext(int port_number)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
(_ret_, c_aa_ext) = api.py_aa_open_ext(port_number)
# aa_ext post-processing
aa_ext = AardvarkExt()
(aa_ext.version.software, aa_ext.version.firmware, aa_ext.version.hardware, aa_ext.version.sw_req_by_fw, aa_ext.version.fw_req_by_sw, aa_ext.version.api_req_by_sw, aa_ext.features) = c_aa_ext
return (_ret_, aa_ext)
# Close the Aardvark port.
def aa_close (aardvark):
"""usage: int return = aa_close(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_close(aardvark)
# Return the port for this Aardvark handle.
#
# The port number is a zero-indexed integer.
def aa_port (aardvark):
"""usage: int return = aa_port(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_port(aardvark)
# Return the device features as a bit-mask of values, or
# an error code if the handle is not valid.
AA_FEATURE_SPI = 0x00000001
AA_FEATURE_I2C = 0x00000002
AA_FEATURE_GPIO = 0x00000008
AA_FEATURE_I2C_MONITOR = 0x00000010
def aa_features (aardvark):
"""usage: int return = aa_features(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_features(aardvark)
# Return the unique ID for this Aardvark adapter.
# IDs are guaranteed to be non-zero if valid.
# The ID is the unsigned integer representation of the
# 10-digit serial number.
def aa_unique_id (aardvark):
"""usage: u32 return = aa_unique_id(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_unique_id(aardvark)
# Return the status string for the given status code.
# If the code is not valid or the library function cannot
# be loaded, return a NULL string.
def aa_status_string (status):
"""usage: str return = aa_status_string(int status)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_status_string(status)
# Enable logging to a file. The handle must be standard file
# descriptor. In C, a file descriptor can be obtained by using
# the ANSI C function "open" or by using the function "fileno"
# on a FILE* stream. A FILE* stream can be obtained using "fopen"
# or can correspond to the common "stdout" or "stderr" --
# available when including stdlib.h
AA_LOG_STDOUT = 1
AA_LOG_STDERR = 2
def aa_log (aardvark, level, handle):
"""usage: int return = aa_log(Aardvark aardvark, int level, int handle)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_log(aardvark, level, handle)
# Return the version matrix for the device attached to the
# given handle. If the handle is 0 or invalid, only the
# software and required api versions are set.
def aa_version (aardvark):
"""usage: (int return, AardvarkVersion version) = aa_version(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
(_ret_, c_version) = api.py_aa_version(aardvark)
# version post-processing
version = AardvarkVersion()
(version.software, version.firmware, version.hardware, version.sw_req_by_fw, version.fw_req_by_sw, version.api_req_by_sw) = c_version
return (_ret_, version)
# Configure the device by enabling/disabling I2C, SPI, and
# GPIO functions.
# enum AardvarkConfig
AA_CONFIG_GPIO_ONLY = 0x00
AA_CONFIG_SPI_GPIO = 0x01
AA_CONFIG_GPIO_I2C = 0x02
AA_CONFIG_SPI_I2C = 0x03
AA_CONFIG_QUERY = 0x80
AA_CONFIG_SPI_MASK = 0x00000001
AA_CONFIG_I2C_MASK = 0x00000002
def aa_configure (aardvark, config):
"""usage: int return = aa_configure(Aardvark aardvark, AardvarkConfig config)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_configure(aardvark, config)
# Configure the target power pins.
# This is only supported on hardware versions >= 2.00
AA_TARGET_POWER_NONE = 0x00
AA_TARGET_POWER_BOTH = 0x03
AA_TARGET_POWER_QUERY = 0x80
def aa_target_power (aardvark, power_mask):
"""usage: int return = aa_target_power(Aardvark aardvark, u08 power_mask)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_target_power(aardvark, power_mask)
# Sleep for the specified number of milliseconds
# Accuracy depends on the operating system scheduler
# Returns the number of milliseconds slept
def aa_sleep_ms (milliseconds):
"""usage: u32 return = aa_sleep_ms(u32 milliseconds)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_sleep_ms(milliseconds)
#==========================================================================
# ASYNC MESSAGE POLLING
#==========================================================================
# Polling function to check if there are any asynchronous
# messages pending for processing. The function takes a timeout
# value in units of milliseconds. If the timeout is < 0, the
# function will block until data is received. If the timeout is 0,
# the function will perform a non-blocking check.
AA_ASYNC_NO_DATA = 0x00000000
AA_ASYNC_I2C_READ = 0x00000001
AA_ASYNC_I2C_WRITE = 0x00000002
AA_ASYNC_SPI = 0x00000004
AA_ASYNC_I2C_MONITOR = 0x00000008
def aa_async_poll (aardvark, timeout):
"""usage: int return = aa_async_poll(Aardvark aardvark, int timeout)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_async_poll(aardvark, timeout)
#==========================================================================
# I2C API
#==========================================================================
# Free the I2C bus.
def aa_i2c_free_bus (aardvark):
"""usage: int return = aa_i2c_free_bus(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_free_bus(aardvark)
# Set the I2C bit rate in kilohertz. If a zero is passed as the
# bitrate, the bitrate is unchanged and the current bitrate is
# returned.
def aa_i2c_bitrate (aardvark, bitrate_khz):
"""usage: int return = aa_i2c_bitrate(Aardvark aardvark, int bitrate_khz)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_bitrate(aardvark, bitrate_khz)
# Set the bus lock timeout. If a zero is passed as the timeout,
# the timeout is unchanged and the current timeout is returned.
def aa_i2c_bus_timeout (aardvark, timeout_ms):
"""usage: int return = aa_i2c_bus_timeout(Aardvark aardvark, u16 timeout_ms)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_bus_timeout(aardvark, timeout_ms)
# enum AardvarkI2cFlags
AA_I2C_NO_FLAGS = 0x00
AA_I2C_10_BIT_ADDR = 0x01
AA_I2C_COMBINED_FMT = 0x02
AA_I2C_NO_STOP = 0x04
AA_I2C_SIZED_READ = 0x10
AA_I2C_SIZED_READ_EXTRA1 = 0x20
# Read a stream of bytes from the I2C slave device.
def aa_i2c_read (aardvark, slave_addr, flags, data_in):
"""usage: (int return, u08[] data_in) = aa_i2c_read(Aardvark aardvark, u16 slave_addr, AardvarkI2cFlags flags, u08[] data_in)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data_in pre-processing
__data_in = isinstance(data_in, int)
if __data_in:
(data_in, num_bytes) = (array_u08(data_in), data_in)
else:
(data_in, num_bytes) = isinstance(data_in, ArrayType) and (data_in, len(data_in)) or (data_in[0], min(len(data_in[0]), int(data_in[1])))
if data_in.typecode != 'B':
raise TypeError("type for 'data_in' must be array('B')")
# Call API function
(_ret_) = api.py_aa_i2c_read(aardvark, slave_addr, flags, num_bytes, data_in)
# data_in post-processing
if __data_in: del data_in[max(0, min(_ret_, len(data_in))):]
return (_ret_, data_in)
# enum AardvarkI2cStatus
AA_I2C_STATUS_OK = 0
AA_I2C_STATUS_BUS_ERROR = 1
AA_I2C_STATUS_SLA_ACK = 2
AA_I2C_STATUS_SLA_NACK = 3
AA_I2C_STATUS_DATA_NACK = 4
AA_I2C_STATUS_ARB_LOST = 5
AA_I2C_STATUS_BUS_LOCKED = 6
AA_I2C_STATUS_LAST_DATA_ACK = 7
# Read a stream of bytes from the I2C slave device.
# This API function returns the number of bytes read into
# the num_read variable. The return value of the function
# is a status code.
def aa_i2c_read_ext (aardvark, slave_addr, flags, data_in):
"""usage: (int return, u08[] data_in, u16 num_read) = aa_i2c_read_ext(Aardvark aardvark, u16 slave_addr, AardvarkI2cFlags flags, u08[] data_in)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data_in pre-processing
__data_in = isinstance(data_in, int)
if __data_in:
(data_in, num_bytes) = (array_u08(data_in), data_in)
else:
(data_in, num_bytes) = isinstance(data_in, ArrayType) and (data_in, len(data_in)) or (data_in[0], min(len(data_in[0]), int(data_in[1])))
if data_in.typecode != 'B':
raise TypeError("type for 'data_in' must be array('B')")
# Call API function
(_ret_, num_read) = api.py_aa_i2c_read_ext(aardvark, slave_addr, flags, num_bytes, data_in)
# data_in post-processing
if __data_in: del data_in[max(0, min(num_read, len(data_in))):]
return (_ret_, data_in, num_read)
# Write a stream of bytes to the I2C slave device.
def aa_i2c_write (aardvark, slave_addr, flags, data_out):
"""usage: int return = aa_i2c_write(Aardvark aardvark, u16 slave_addr, AardvarkI2cFlags flags, u08[] data_out)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data_out pre-processing
(data_out, num_bytes) = isinstance(data_out, ArrayType) and (data_out, len(data_out)) or (data_out[0], min(len(data_out[0]), int(data_out[1])))
if data_out.typecode != 'B':
raise TypeError("type for 'data_out' must be array('B')")
# Call API function
return api.py_aa_i2c_write(aardvark, slave_addr, flags, num_bytes, data_out)
# Write a stream of bytes to the I2C slave device.
# This API function returns the number of bytes written into
# the num_written variable. The return value of the function
# is a status code.
def aa_i2c_write_ext (aardvark, slave_addr, flags, data_out):
"""usage: (int return, u16 num_written) = aa_i2c_write_ext(Aardvark aardvark, u16 slave_addr, AardvarkI2cFlags flags, u08[] data_out)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data_out pre-processing
(data_out, num_bytes) = isinstance(data_out, ArrayType) and (data_out, len(data_out)) or (data_out[0], min(len(data_out[0]), int(data_out[1])))
if data_out.typecode != 'B':
raise TypeError("type for 'data_out' must be array('B')")
# Call API function
return api.py_aa_i2c_write_ext(aardvark, slave_addr, flags, num_bytes, data_out)
# Do an atomic write+read to an I2C slave device by first
# writing a stream of bytes to the I2C slave device and then
# reading a stream of bytes back from the same slave device.
# This API function returns the number of bytes written into
# the num_written variable and the number of bytes read into
# the num_read variable. The return value of the function is
# the status given as (read_status << 8) | (write_status).
def aa_i2c_write_read (aardvark, slave_addr, flags, out_data, in_data):
"""usage: (int return, u16 num_written, u08[] in_data, u16 num_read) = aa_i2c_write_read(Aardvark aardvark, u16 slave_addr, AardvarkI2cFlags flags, u08[] out_data, u08[] in_data)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# out_data pre-processing
(out_data, out_num_bytes) = isinstance(out_data, ArrayType) and (out_data, len(out_data)) or (out_data[0], min(len(out_data[0]), int(out_data[1])))
if out_data.typecode != 'B':
raise TypeError("type for 'out_data' must be array('B')")
# in_data pre-processing
__in_data = isinstance(in_data, int)
if __in_data:
(in_data, in_num_bytes) = (array_u08(in_data), in_data)
else:
(in_data, in_num_bytes) = isinstance(in_data, ArrayType) and (in_data, len(in_data)) or (in_data[0], min(len(in_data[0]), int(in_data[1])))
if in_data.typecode != 'B':
raise TypeError("type for 'in_data' must be array('B')")
# Call API function
(_ret_, num_written, num_read) = api.py_aa_i2c_write_read(aardvark, slave_addr, flags, out_num_bytes, out_data, in_num_bytes, in_data)
# in_data post-processing
if __in_data: del in_data[max(0, min(num_read, len(in_data))):]
return (_ret_, num_written, in_data, num_read)
# Enable/Disable the Aardvark as an I2C slave device
def aa_i2c_slave_enable (aardvark, addr, maxTxBytes, maxRxBytes):
"""usage: int return = aa_i2c_slave_enable(Aardvark aardvark, u08 addr, u16 maxTxBytes, u16 maxRxBytes)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_slave_enable(aardvark, addr, maxTxBytes, maxRxBytes)
def aa_i2c_slave_disable (aardvark):
"""usage: int return = aa_i2c_slave_disable(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_slave_disable(aardvark)
# Set the slave response in the event the Aardvark is put
# into slave mode and contacted by a Master.
def aa_i2c_slave_set_response (aardvark, data_out):
"""usage: int return = aa_i2c_slave_set_response(Aardvark aardvark, u08[] data_out)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data_out pre-processing
(data_out, num_bytes) = isinstance(data_out, ArrayType) and (data_out, len(data_out)) or (data_out[0], min(len(data_out[0]), int(data_out[1])))
if data_out.typecode != 'B':
raise TypeError("type for 'data_out' must be array('B')")
# Call API function
return api.py_aa_i2c_slave_set_response(aardvark, num_bytes, data_out)
# Return number of bytes written from a previous
# Aardvark->I2C_master transmission. Since the transmission is
# happening asynchronously with respect to the PC host
# software, there could be responses queued up from many
# previous write transactions.
def aa_i2c_slave_write_stats (aardvark):
"""usage: int return = aa_i2c_slave_write_stats(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_slave_write_stats(aardvark)
# Read the bytes from an I2C slave reception
def aa_i2c_slave_read (aardvark, data_in):
"""usage: (int return, u08 addr, u08[] data_in) = aa_i2c_slave_read(Aardvark aardvark, u08[] data_in)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data_in pre-processing
__data_in = isinstance(data_in, int)
if __data_in:
(data_in, num_bytes) = (array_u08(data_in), data_in)
else:
(data_in, num_bytes) = isinstance(data_in, ArrayType) and (data_in, len(data_in)) or (data_in[0], min(len(data_in[0]), int(data_in[1])))
if data_in.typecode != 'B':
raise TypeError("type for 'data_in' must be array('B')")
# Call API function
(_ret_, addr) = api.py_aa_i2c_slave_read(aardvark, num_bytes, data_in)
# data_in post-processing
if __data_in: del data_in[max(0, min(_ret_, len(data_in))):]
return (_ret_, addr, data_in)
# Extended functions that return status code
def aa_i2c_slave_write_stats_ext (aardvark):
"""usage: (int return, u16 num_written) = aa_i2c_slave_write_stats_ext(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_slave_write_stats_ext(aardvark)
def aa_i2c_slave_read_ext (aardvark, data_in):
"""usage: (int return, u08 addr, u08[] data_in, u16 num_read) = aa_i2c_slave_read_ext(Aardvark aardvark, u08[] data_in)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data_in pre-processing
__data_in = isinstance(data_in, int)
if __data_in:
(data_in, num_bytes) = (array_u08(data_in), data_in)
else:
(data_in, num_bytes) = isinstance(data_in, ArrayType) and (data_in, len(data_in)) or (data_in[0], min(len(data_in[0]), int(data_in[1])))
if data_in.typecode != 'B':
raise TypeError("type for 'data_in' must be array('B')")
# Call API function
(_ret_, addr, num_read) = api.py_aa_i2c_slave_read_ext(aardvark, num_bytes, data_in)
# data_in post-processing
if __data_in: del data_in[max(0, min(num_read, len(data_in))):]
return (_ret_, addr, data_in, num_read)
# Enable the I2C bus monitor
# This disables all other functions on the Aardvark adapter
def aa_i2c_monitor_enable (aardvark):
"""usage: int return = aa_i2c_monitor_enable(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_monitor_enable(aardvark)
# Disable the I2C bus monitor
def aa_i2c_monitor_disable (aardvark):
"""usage: int return = aa_i2c_monitor_disable(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_monitor_disable(aardvark)
# Read the data collected by the bus monitor
AA_I2C_MONITOR_DATA = 0x00ff
AA_I2C_MONITOR_NACK = 0x0100
AA_I2C_MONITOR_CMD_START = 0xff00
AA_I2C_MONITOR_CMD_STOP = 0xff01
def aa_i2c_monitor_read (aardvark, data):
"""usage: (int return, u16[] data) = aa_i2c_monitor_read(Aardvark aardvark, u16[] data)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data pre-processing
__data = isinstance(data, int)
if __data:
(data, num_bytes) = (array_u16(data), data)
else:
(data, num_bytes) = isinstance(data, ArrayType) and (data, len(data)) or (data[0], min(len(data[0]), int(data[1])))
if data.typecode != 'H':
raise TypeError("type for 'data' must be array('H')")
# Call API function
(_ret_) = api.py_aa_i2c_monitor_read(aardvark, num_bytes, data)
# data post-processing
if __data: del data[max(0, min(_ret_, len(data))):]
return (_ret_, data)
# Configure the I2C pullup resistors.
# This is only supported on hardware versions >= 2.00
AA_I2C_PULLUP_NONE = 0x00
AA_I2C_PULLUP_BOTH = 0x03
AA_I2C_PULLUP_QUERY = 0x80
def aa_i2c_pullup (aardvark, pullup_mask):
"""usage: int return = aa_i2c_pullup(Aardvark aardvark, u08 pullup_mask)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_pullup(aardvark, pullup_mask)
#==========================================================================
# SPI API
#==========================================================================
# Set the SPI bit rate in kilohertz. If a zero is passed as the
# bitrate, the bitrate is unchanged and the current bitrate is
# returned.
def aa_spi_bitrate (aardvark, bitrate_khz):
"""usage: int return = aa_spi_bitrate(Aardvark aardvark, int bitrate_khz)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_spi_bitrate(aardvark, bitrate_khz)
# These configuration parameters specify how to clock the
# bits that are sent and received on the Aardvark SPI
# interface.
#
# The polarity option specifies which transition
# constitutes the leading edge and which transition is the
# falling edge. For example, AA_SPI_POL_RISING_FALLING
# would configure the SPI to idle the SCK clock line low.
# The clock would then transition low-to-high on the
# leading edge and high-to-low on the trailing edge.
#
# The phase option determines whether to sample or setup on
# the leading edge. For example, AA_SPI_PHASE_SAMPLE_SETUP
# would configure the SPI to sample on the leading edge and
# setup on the trailing edge.
#
# The bitorder option is used to indicate whether LSB or
# MSB is shifted first.
#
# See the diagrams in the Aardvark datasheet for
# more details.
# enum AardvarkSpiPolarity
AA_SPI_POL_RISING_FALLING = 0
AA_SPI_POL_FALLING_RISING = 1
# enum AardvarkSpiPhase
AA_SPI_PHASE_SAMPLE_SETUP = 0
AA_SPI_PHASE_SETUP_SAMPLE = 1
# enum AardvarkSpiBitorder
AA_SPI_BITORDER_MSB = 0
AA_SPI_BITORDER_LSB = 1
# Configure the SPI master or slave interface
def aa_spi_configure (aardvark, polarity, phase, bitorder):
"""usage: int return = aa_spi_configure(Aardvark aardvark, AardvarkSpiPolarity polarity, AardvarkSpiPhase phase, AardvarkSpiBitorder bitorder)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_spi_configure(aardvark, polarity, phase, bitorder)
# Write a stream of bytes to the downstream SPI slave device.
def aa_spi_write (aardvark, data_out, data_in):
"""usage: (int return, u08[] data_in) = aa_spi_write(Aardvark aardvark, u08[] data_out, u08[] data_in)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data_out pre-processing
(data_out, out_num_bytes) = isinstance(data_out, ArrayType) and (data_out, len(data_out)) or (data_out[0], min(len(data_out[0]), int(data_out[1])))
if data_out.typecode != 'B':
raise TypeError("type for 'data_out' must be array('B')")
# data_in pre-processing
__data_in = isinstance(data_in, int)
if __data_in:
(data_in, in_num_bytes) = (array_u08(data_in), data_in)
else:
(data_in, in_num_bytes) = isinstance(data_in, ArrayType) and (data_in, len(data_in)) or (data_in[0], min(len(data_in[0]), int(data_in[1])))
if data_in.typecode != 'B':
raise TypeError("type for 'data_in' must be array('B')")
# Call API function
(_ret_) = api.py_aa_spi_write(aardvark, out_num_bytes, data_out, in_num_bytes, data_in)
# data_in post-processing
if __data_in: del data_in[max(0, min(_ret_, len(data_in))):]
return (_ret_, data_in)
# Enable/Disable the Aardvark as an SPI slave device
def aa_spi_slave_enable (aardvark):
"""usage: int return = aa_spi_slave_enable(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_spi_slave_enable(aardvark)
def aa_spi_slave_disable (aardvark):
"""usage: int return = aa_spi_slave_disable(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_spi_slave_disable(aardvark)
# Set the slave response in the event the Aardvark is put
# into slave mode and contacted by a Master.
def aa_spi_slave_set_response (aardvark, data_out):
"""usage: int return = aa_spi_slave_set_response(Aardvark aardvark, u08[] data_out)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data_out pre-processing
(data_out, num_bytes) = isinstance(data_out, ArrayType) and (data_out, len(data_out)) or (data_out[0], min(len(data_out[0]), int(data_out[1])))
if data_out.typecode != 'B':
raise TypeError("type for 'data_out' must be array('B')")
# Call API function
return api.py_aa_spi_slave_set_response(aardvark, num_bytes, data_out)
# Read the bytes from an SPI slave reception
def aa_spi_slave_read (aardvark, data_in):
"""usage: (int return, u08[] data_in) = aa_spi_slave_read(Aardvark aardvark, u08[] data_in)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data_in pre-processing
__data_in = isinstance(data_in, int)
if __data_in:
(data_in, num_bytes) = (array_u08(data_in), data_in)
else:
(data_in, num_bytes) = isinstance(data_in, ArrayType) and (data_in, len(data_in)) or (data_in[0], min(len(data_in[0]), int(data_in[1])))
if data_in.typecode != 'B':
raise TypeError("type for 'data_in' must be array('B')")
# Call API function
(_ret_) = api.py_aa_spi_slave_read(aardvark, num_bytes, data_in)
# data_in post-processing
if __data_in: del data_in[max(0, min(_ret_, len(data_in))):]
return (_ret_, data_in)
# Change the output polarity on the SS line.
#
# Note: When configured as an SPI slave, the Aardvark will
# always be setup with SS as active low. Hence this function
# only affects the SPI master functions on the Aardvark.
# enum AardvarkSpiSSPolarity
AA_SPI_SS_ACTIVE_LOW = 0
AA_SPI_SS_ACTIVE_HIGH = 1
def aa_spi_master_ss_polarity (aardvark, polarity):
"""usage: int return = aa_spi_master_ss_polarity(Aardvark aardvark, AardvarkSpiSSPolarity polarity)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_spi_master_ss_polarity(aardvark, polarity)
#==========================================================================
# GPIO API
#==========================================================================
# The following enumerated type maps the named lines on the
# Aardvark I2C/SPI line to bit positions in the GPIO API.
# All GPIO API functions will index these lines through an
# 8-bit masked value. Thus, each bit position in the mask
# can be referred back its corresponding line through the
# enumerated type.
# enum AardvarkGpioBits
AA_GPIO_SCL = 0x01
AA_GPIO_SDA = 0x02
AA_GPIO_MISO = 0x04
AA_GPIO_SCK = 0x08
AA_GPIO_MOSI = 0x10
AA_GPIO_SS = 0x20
# Configure the GPIO, specifying the direction of each bit.
#
# A call to this function will not change the value of the pullup
# mask in the Aardvark. This is illustrated by the following
# example:
# (1) Direction mask is first set to 0x00
# (2) Pullup is set to 0x01
# (3) Direction mask is set to 0x01
# (4) Direction mask is later set back to 0x00.
#
# The pullup will be active after (4).
#
# On Aardvark power-up, the default value of the direction
# mask is 0x00.
AA_GPIO_DIR_INPUT = 0
AA_GPIO_DIR_OUTPUT = 1
def aa_gpio_direction (aardvark, direction_mask):
"""usage: int return = aa_gpio_direction(Aardvark aardvark, u08 direction_mask)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_gpio_direction(aardvark, direction_mask)
# Enable an internal pullup on any of the GPIO input lines.
#
# Note: If a line is configured as an output, the pullup bit
# for that line will be ignored, though that pullup bit will
# be cached in case the line is later configured as an input.
#
# By default the pullup mask is 0x00.
AA_GPIO_PULLUP_OFF = 0
AA_GPIO_PULLUP_ON = 1
def aa_gpio_pullup (aardvark, pullup_mask):
"""usage: int return = aa_gpio_pullup(Aardvark aardvark, u08 pullup_mask)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_gpio_pullup(aardvark, pullup_mask)
# Read the current digital values on the GPIO input lines.
#
# The bits will be ordered as described by AA_GPIO_BITS. If a
# line is configured as an output, its corresponding bit
# position in the mask will be undefined.
def aa_gpio_get (aardvark):
"""usage: int return = aa_gpio_get(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_gpio_get(aardvark)
# Set the outputs on the GPIO lines.
#
# Note: If a line is configured as an input, it will not be
# affected by this call, but the output value for that line
# will be cached in the event that the line is later
# configured as an output.
def aa_gpio_set (aardvark, value):
"""usage: int return = aa_gpio_set(Aardvark aardvark, u08 value)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_gpio_set(aardvark, value)
# Block until there is a change on the GPIO input lines.
# Pins configured as outputs will be ignored.
#
# The function will return either when a change has occurred or
# the timeout expires. The timeout, specified in millisecods, has
# a precision of ~16 ms. The maximum allowable timeout is
# approximately 4 seconds. If the timeout expires, this function
# will return the current state of the GPIO lines.
#
# This function will return immediately with the current value
# of the GPIO lines for the first invocation after any of the
# following functions are called: aa_configure,
# aa_gpio_direction, or aa_gpio_pullup.
#
# If the function aa_gpio_get is called before calling
# aa_gpio_change, aa_gpio_change will only register any changes
# from the value last returned by aa_gpio_get.
def aa_gpio_change (aardvark, timeout):
"""usage: int return = aa_gpio_change(Aardvark aardvark, u16 timeout)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_gpio_change(aardvark, timeout)
| 41.587903
| 196
| 0.684656
|
a1e1820d5bf5fe5771af05fabe5d2ae71d63b110
| 1,963
|
py
|
Python
|
tools/perf/core/tbmv3/run_tbmv3_metric.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/perf/core/tbmv3/run_tbmv3_metric.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/perf/core/tbmv3/run_tbmv3_metric.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2021-01-05T23:43:46.000Z
|
2021-01-07T23:36:34.000Z
|
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import json
import os
import sys
# TODO(crbug.com/1012687): Adding tools/perf to path. We can remove this when
# we have a wrapper script under tools/perf that sets up import paths more
# nicely.
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from core import path_util
path_util.AddPyUtilsToPath()
path_util.AddTracingToPath()
from core.tbmv3 import trace_processor
_CHROMIUM_SRC_PATH = os.path.join(
os.path.dirname(__file__), '..', '..', '..', '..')
def _WriteHistogramSetToFile(histograms, outfile):
with open(outfile, 'w') as f:
json.dump(histograms.AsDicts(), f, indent=2, sort_keys=True,
separators=(',', ': '))
f.write("\n")
def Main(cli_args):
parser = argparse.ArgumentParser(
description='[Experimental] Runs TBMv3 metrics on local traces and '
'produces histogram json.')
parser.add_argument('--trace', required=True,
help='Trace file you want to compute metric on')
parser.add_argument('--metric', required=True,
help=('Name of the metric you want to run'))
parser.add_argument(
'--trace-processor-path',
help='Path to trace processor shell. '
'Default: Binary downloaded from cloud storage.')
parser.add_argument('--outfile', default='results.json',
help='Path to output file. Default: %(default)s')
args = parser.parse_args(cli_args)
histograms = trace_processor.RunMetric(args.trace_processor_path,
args.trace, args.metric)
_WriteHistogramSetToFile(histograms, args.outfile)
print('JSON result created in file://%s' % (args.outfile))
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| 32.716667
| 77
| 0.679063
|
1a4bd3f29af7565c6ef504fb9d0385081f6f618f
| 2,369
|
py
|
Python
|
Quantum Phase Estimation/Mutation Testing/QPE Mutation Testing/Remove_mutant_3.py
|
Lilgabz/Quantum-Algorithm-Implementations
|
2bb5df522d76e94b300275dfefff2869ff31bc2c
|
[
"MIT"
] | 1
|
2022-03-20T17:20:09.000Z
|
2022-03-20T17:20:09.000Z
|
Quantum Phase Estimation/Mutation Testing/QPE Mutation Testing/Remove_mutant_3.py
|
Lilgabz/Quantum-Algorithm-Implementations
|
2bb5df522d76e94b300275dfefff2869ff31bc2c
|
[
"MIT"
] | null | null | null |
Quantum Phase Estimation/Mutation Testing/QPE Mutation Testing/Remove_mutant_3.py
|
Lilgabz/Quantum-Algorithm-Implementations
|
2bb5df522d76e94b300275dfefff2869ff31bc2c
|
[
"MIT"
] | 2
|
2021-12-30T22:23:20.000Z
|
2022-03-20T17:20:22.000Z
|
import warnings
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, Aer, transpile, assemble
from qiskit.tools.monitor import job_monitor
from qiskit.circuit.library import QFT
from qiskit.visualization import plot_histogram, plot_bloch_multivector
warnings.filterwarnings("ignore", category=DeprecationWarning)
import numpy as np
pi = np.pi
def qft_dagger(qc, n):
"""n-qubit QFTdagger the first n qubits in circ"""
# Don't forget the Swaps!
for qubit in range(n//2):
qc.swap(qubit, n-qubit-1)
for j in range(n):
for m in range(j):
qc.cp(-pi/float(2**(j-m)), m, j)
qc.h(j)
def generalised_qpe(amt_estimation_qubits, angle, shots=10000):
go = True
while go:
# Create and set up circuit
qpe3 = QuantumCircuit(amt_estimation_qubits+1, amt_estimation_qubits)
# Apply H-Gates to counting qubits:
for qubit in range(amt_estimation_qubits):
qpe3.h(qubit)
# Prepare our eigenstate |psi>:
repetitions = 1
for counting_qubit in range(amt_estimation_qubits):
for i in range(repetitions):
qpe3.cp(angle, counting_qubit, amt_estimation_qubits);
repetitions *= 2
# Do the inverse QFT:
qft_dagger(qpe3, amt_estimation_qubits)
# Measure of course!
qpe3.barrier()
for n in range(amt_estimation_qubits):
qpe3.measure(n,n)
aer_sim = Aer.get_backend('aer_simulator')
t_qpe3 = transpile(qpe3, aer_sim)
qobj = assemble(t_qpe3, shots=shots)
results = aer_sim.run(qobj).result()
answer = results.get_counts()
answer2 = {int(k,2)/2**amt_estimation_qubits: v for k, v in answer.items()}
print(answer2)
try:
freq = answer.most_frequent()
go = False
except:
pass
#print("Most frequent '" + answer.most_frequent() + "'")
print("Approx rotation angle by Z from the unitary in degrees '" + str(360 * int(answer.most_frequent(), 2)/2**amt_estimation_qubits) + "'")
#print("Phase Calculation " + answer.most_frequent())
##return(plot_histogram(answer))
##comment out the return if you want to see the histogram
return((int(answer.most_frequent(), 2)/2**amt_estimation_qubits))
| 32.452055
| 144
| 0.642043
|
8f810052c5176fa58b6f589a2421e9cfd1bfe1a3
| 26,954
|
py
|
Python
|
heat/engine/clients/os/nova.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
heat/engine/clients/os/nova.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
heat/engine/clients/os/nova.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import email
from email.mime import multipart
from email.mime import text
import logging
import os
import pkgutil
import string
from novaclient import client as nc
from novaclient import exceptions
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import six
from six.moves.urllib import parse as urlparse
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine.clients import client_plugin
from heat.engine import constraints
LOG = logging.getLogger(__name__)
NOVACLIENT_VERSION = "2"
class NovaClientPlugin(client_plugin.ClientPlugin):
deferred_server_statuses = ['BUILD',
'HARD_REBOOT',
'PASSWORD',
'REBOOT',
'RESCUE',
'RESIZE',
'REVERT_RESIZE',
'SHUTOFF',
'SUSPENDED',
'VERIFY_RESIZE']
exceptions_module = exceptions
service_types = [COMPUTE] = ['compute']
EXTENSIONS = (
OS_INTERFACE_EXTENSION
) = (
"OSInterface"
)
def _create(self):
endpoint_type = self._get_client_option('nova', 'endpoint_type')
management_url = self.url_for(service_type=self.COMPUTE,
endpoint_type=endpoint_type)
extensions = nc.discover_extensions(NOVACLIENT_VERSION)
args = {
'project_id': self.context.tenant_id,
'auth_url': self.context.auth_url,
'auth_token': self.auth_token,
'service_type': self.COMPUTE,
'username': None,
'api_key': None,
'extensions': extensions,
'endpoint_type': endpoint_type,
'http_log_debug': self._get_client_option('nova',
'http_log_debug'),
'cacert': self._get_client_option('nova', 'ca_file'),
'insecure': self._get_client_option('nova', 'insecure')
}
client = nc.Client(NOVACLIENT_VERSION, **args)
client.client.set_management_url(management_url)
return client
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
def is_over_limit(self, ex):
return isinstance(ex, exceptions.OverLimit)
def is_bad_request(self, ex):
return isinstance(ex, exceptions.BadRequest)
def is_conflict(self, ex):
return isinstance(ex, exceptions.Conflict)
def is_unprocessable_entity(self, ex):
http_status = (getattr(ex, 'http_status', None) or
getattr(ex, 'code', None))
return (isinstance(ex, exceptions.ClientException) and
http_status == 422)
def get_server(self, server):
"""Return fresh server object.
Substitutes Nova's NotFound for Heat's EntityNotFound,
to be returned to user as HTTP error.
"""
try:
return self.client().servers.get(server)
except exceptions.NotFound as ex:
LOG.warn(_LW('Server (%(server)s) not found: %(ex)s'),
{'server': server, 'ex': ex})
raise exception.EntityNotFound(entity='Server', name=server)
def fetch_server(self, server_id):
"""Fetch fresh server object from Nova.
Log warnings and return None for non-critical API errors.
Use this method in various ``check_*_complete`` resource methods,
where intermittent errors can be tolerated.
"""
server = None
try:
server = self.client().servers.get(server_id)
except exceptions.OverLimit as exc:
LOG.warn(_LW("Received an OverLimit response when "
"fetching server (%(id)s) : %(exception)s"),
{'id': server_id,
'exception': exc})
except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))):
LOG.warn(_LW("Received the following exception when "
"fetching server (%(id)s) : %(exception)s"),
{'id': server_id,
'exception': exc})
else:
raise
return server
def refresh_server(self, server):
"""Refresh server's attributes.
Also log warnings for non-critical API errors.
"""
try:
server.get()
except exceptions.OverLimit as exc:
LOG.warn(_LW("Server %(name)s (%(id)s) received an OverLimit "
"response during server.get(): %(exception)s"),
{'name': server.name,
'id': server.id,
'exception': exc})
except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))):
LOG.warn(_LW('Server "%(name)s" (%(id)s) received the '
'following exception during server.get(): '
'%(exception)s'),
{'name': server.name,
'id': server.id,
'exception': exc})
else:
raise
def get_ip(self, server, net_type, ip_version):
"""Return the server's IP of the given type and version."""
if net_type in server.addresses:
for ip in server.addresses[net_type]:
if ip['version'] == ip_version:
return ip['addr']
def get_status(self, server):
"""Return the server's status.
:param server: server object
:returns: status as a string
"""
# Some clouds append extra (STATUS) strings to the status, strip it
return server.status.split('(')[0]
def _check_active(self, server, res_name='Server'):
"""Check server status.
Accepts both server IDs and server objects.
Returns True if server is ACTIVE,
raises errors when server has an ERROR or unknown to Heat status,
returns False otherwise.
:param res_name: name of the resource to use in the exception message
"""
# not checking with is_uuid_like as most tests use strings e.g. '1234'
if isinstance(server, six.string_types):
server = self.fetch_server(server)
if server is None:
return False
else:
status = self.get_status(server)
else:
status = self.get_status(server)
if status != 'ACTIVE':
self.refresh_server(server)
status = self.get_status(server)
if status in self.deferred_server_statuses:
return False
elif status == 'ACTIVE':
return True
elif status == 'ERROR':
fault = getattr(server, 'fault', {})
raise exception.ResourceInError(
resource_status=status,
status_reason=_("Message: %(message)s, Code: %(code)s") % {
'message': fault.get('message', _('Unknown')),
'code': fault.get('code', _('Unknown'))
})
else:
raise exception.ResourceUnknownStatus(
resource_status=server.status,
result=_('%s is not active') % res_name)
def get_flavor_id(self, flavor):
"""Get the id for the specified flavor name.
If the specified value is flavor id, just return it.
:param flavor: the name of the flavor to find
:returns: the id of :flavor:
:raises: exception.FlavorMissing
"""
flavor_id = None
flavor_list = self.client().flavors.list()
for o in flavor_list:
if o.name == flavor:
flavor_id = o.id
break
if o.id == flavor:
flavor_id = o.id
break
if flavor_id is None:
raise exception.FlavorMissing(flavor_id=flavor)
return flavor_id
def get_keypair(self, key_name):
"""Get the public key specified by :key_name:
:param key_name: the name of the key to look for
:returns: the keypair (name, public_key) for :key_name:
:raises: exception.UserKeyPairMissing
"""
try:
return self.client().keypairs.get(key_name)
except exceptions.NotFound:
raise exception.UserKeyPairMissing(key_name=key_name)
def build_userdata(self, metadata, userdata=None, instance_user=None,
user_data_format='HEAT_CFNTOOLS'):
"""Build multipart data blob for CloudInit.
Data blob includes user-supplied Metadata, user data, and the required
Heat in-instance configuration.
:param resource: the resource implementation
:type resource: heat.engine.Resource
:param userdata: user data string
:type userdata: str or None
:param instance_user: the user to create on the server
:type instance_user: string
:param user_data_format: Format of user data to return
:type user_data_format: string
:returns: multipart mime as a string
"""
if user_data_format == 'RAW':
return userdata
is_cfntools = user_data_format == 'HEAT_CFNTOOLS'
is_software_config = user_data_format == 'SOFTWARE_CONFIG'
def make_subpart(content, filename, subtype=None):
if subtype is None:
subtype = os.path.splitext(filename)[0]
if content is None:
content = ''
msg = text.MIMEText(content, _subtype=subtype)
msg.add_header('Content-Disposition', 'attachment',
filename=filename)
return msg
def read_cloudinit_file(fn):
return pkgutil.get_data(
'heat', 'cloudinit/%s' % fn).decode('utf-8')
if instance_user:
config_custom_user = 'user: %s' % instance_user
# FIXME(shadower): compatibility workaround for cloud-init 0.6.3.
# We can drop this once we stop supporting 0.6.3 (which ships
# with Ubuntu 12.04 LTS).
#
# See bug https://bugs.launchpad.net/heat/+bug/1257410
boothook_custom_user = r"""useradd -m %s
echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
""" % (instance_user, instance_user)
else:
config_custom_user = ''
boothook_custom_user = ''
cloudinit_config = string.Template(
read_cloudinit_file('config')).safe_substitute(
add_custom_user=config_custom_user)
cloudinit_boothook = string.Template(
read_cloudinit_file('boothook.sh')).safe_substitute(
add_custom_user=boothook_custom_user)
attachments = [(cloudinit_config, 'cloud-config'),
(cloudinit_boothook, 'boothook.sh', 'cloud-boothook'),
(read_cloudinit_file('part_handler.py'),
'part-handler.py')]
if is_cfntools:
attachments.append((userdata, 'cfn-userdata', 'x-cfninitdata'))
elif is_software_config:
# attempt to parse userdata as a multipart message, and if it
# is, add each part as an attachment
userdata_parts = None
try:
userdata_parts = email.message_from_string(userdata)
except Exception:
pass
if userdata_parts and userdata_parts.is_multipart():
for part in userdata_parts.get_payload():
attachments.append((part.get_payload(),
part.get_filename(),
part.get_content_subtype()))
else:
attachments.append((userdata, 'userdata', 'x-shellscript'))
if is_cfntools:
attachments.append((read_cloudinit_file('loguserdata.py'),
'loguserdata.py', 'x-shellscript'))
if metadata:
attachments.append((jsonutils.dumps(metadata),
'cfn-init-data', 'x-cfninitdata'))
attachments.append((cfg.CONF.heat_watch_server_url,
'cfn-watch-server', 'x-cfninitdata'))
if is_cfntools:
attachments.append((cfg.CONF.heat_metadata_server_url,
'cfn-metadata-server', 'x-cfninitdata'))
# Create a boto config which the cfntools on the host use to know
# where the cfn and cw API's are to be accessed
cfn_url = urlparse.urlparse(cfg.CONF.heat_metadata_server_url)
cw_url = urlparse.urlparse(cfg.CONF.heat_watch_server_url)
is_secure = cfg.CONF.instance_connection_is_secure
vcerts = cfg.CONF.instance_connection_https_validate_certificates
boto_cfg = "\n".join(["[Boto]",
"debug = 0",
"is_secure = %s" % is_secure,
"https_validate_certificates = %s" % vcerts,
"cfn_region_name = heat",
"cfn_region_endpoint = %s" %
cfn_url.hostname,
"cloudwatch_region_name = heat",
"cloudwatch_region_endpoint = %s" %
cw_url.hostname])
attachments.append((boto_cfg,
'cfn-boto-cfg', 'x-cfninitdata'))
subparts = [make_subpart(*args) for args in attachments]
mime_blob = multipart.MIMEMultipart(_subparts=subparts)
return mime_blob.as_string()
def check_delete_server_complete(self, server_id):
"""Wait for server to disappear from Nova."""
try:
server = self.fetch_server(server_id)
except Exception as exc:
self.ignore_not_found(exc)
return True
if not server:
return False
task_state_in_nova = getattr(server, 'OS-EXT-STS:task_state', None)
# the status of server won't change until the delete task has done
if task_state_in_nova == 'deleting':
return False
status = self.get_status(server)
if status in ("DELETED", "SOFT_DELETED"):
return True
if status == 'ERROR':
fault = getattr(server, 'fault', {})
message = fault.get('message', 'Unknown')
code = fault.get('code')
errmsg = _("Server %(name)s delete failed: (%(code)s) "
"%(message)s") % dict(name=server.name,
code=code,
message=message)
raise exception.ResourceInError(resource_status=status,
status_reason=errmsg)
return False
def rename(self, server, name):
"""Update the name for a server."""
server.update(name)
def resize(self, server_id, flavor_id):
"""Resize the server."""
server = self.fetch_server(server_id)
if server:
server.resize(flavor_id)
return True
else:
return False
def check_resize(self, server_id, flavor_id, flavor):
"""Verify that a resizing server is properly resized.
If that's the case, confirm the resize, if not raise an error.
"""
server = self.fetch_server(server_id)
# resize operation is asynchronous so the server resize may not start
# when checking server status (the server may stay ACTIVE instead
# of RESIZE).
if not server or server.status in ('RESIZE', 'ACTIVE'):
return False
if server.status == 'VERIFY_RESIZE':
return True
else:
raise exception.Error(
_("Resizing to '%(flavor)s' failed, status '%(status)s'") %
dict(flavor=flavor, status=server.status))
def verify_resize(self, server_id):
server = self.fetch_server(server_id)
if not server:
return False
status = self.get_status(server)
if status == 'VERIFY_RESIZE':
server.confirm_resize()
return True
else:
msg = _("Could not confirm resize of server %s") % server_id
raise exception.ResourceUnknownStatus(
result=msg, resource_status=status)
def check_verify_resize(self, server_id):
server = self.fetch_server(server_id)
if not server:
return False
status = self.get_status(server)
if status == 'ACTIVE':
return True
if status == 'VERIFY_RESIZE':
return False
else:
msg = _("Confirm resize for server %s failed") % server_id
raise exception.ResourceUnknownStatus(
result=msg, resource_status=status)
def rebuild(self, server_id, image_id, password=None,
preserve_ephemeral=False):
"""Rebuild the server and call check_rebuild to verify."""
server = self.fetch_server(server_id)
if server:
server.rebuild(image_id, password=password,
preserve_ephemeral=preserve_ephemeral)
return True
else:
return False
def check_rebuild(self, server_id):
"""Verify that a rebuilding server is rebuilt.
Raise error if it ends up in an ERROR state.
"""
server = self.fetch_server(server_id)
if server is None or server.status == 'REBUILD':
return False
if server.status == 'ERROR':
raise exception.Error(
_("Rebuilding server failed, status '%s'") % server.status)
else:
return True
def meta_serialize(self, metadata):
"""Serialize non-string metadata values before sending them to Nova."""
if not isinstance(metadata, collections.Mapping):
raise exception.StackValidationFailed(message=_(
"nova server metadata needs to be a Map."))
return dict((key, (value if isinstance(value,
six.string_types)
else jsonutils.dumps(value))
) for (key, value) in metadata.items())
def meta_update(self, server, metadata):
"""Delete/Add the metadata in nova as needed."""
metadata = self.meta_serialize(metadata)
current_md = server.metadata
to_del = sorted([key for key in six.iterkeys(current_md)
if key not in metadata])
client = self.client()
if len(to_del) > 0:
client.servers.delete_meta(server, to_del)
client.servers.set_meta(server, metadata)
def server_to_ipaddress(self, server):
"""Return the server's IP address, fetching it from Nova."""
try:
server = self.client().servers.get(server)
except exceptions.NotFound as ex:
LOG.warn(_LW('Instance (%(server)s) not found: %(ex)s'),
{'server': server, 'ex': ex})
else:
for n in sorted(server.networks, reverse=True):
if len(server.networks[n]) > 0:
return server.networks[n][0]
def absolute_limits(self):
"""Return the absolute limits as a dictionary."""
limits = self.client().limits.get()
return dict([(limit.name, limit.value)
for limit in list(limits.absolute)])
def get_console_urls(self, server):
"""Return dict-like structure of server's console urls.
The actual console url is lazily resolved on access.
"""
class ConsoleUrls(collections.Mapping):
def __init__(self, server):
self.console_methods = {
'novnc': server.get_vnc_console,
'xvpvnc': server.get_vnc_console,
'spice-html5': server.get_spice_console,
'rdp-html5': server.get_rdp_console,
'serial': server.get_serial_console
}
def __getitem__(self, key):
try:
url = self.console_methods[key](key)['console']['url']
except exceptions.BadRequest as e:
unavailable = 'Unavailable console type'
if unavailable in e.message:
url = e.message
else:
raise
return url
def __len__(self):
return len(self.console_methods)
def __iter__(self):
return (key for key in self.console_methods)
return ConsoleUrls(server)
def get_net_id_by_label(self, label):
try:
net_id = self.client().networks.find(label=label).id
except exceptions.NotFound as ex:
LOG.debug('Nova network (%(net)s) not found: %(ex)s',
{'net': label, 'ex': ex})
raise exception.NovaNetworkNotFound(network=label)
except exceptions.NoUniqueMatch as exc:
LOG.debug('Nova network (%(net)s) is not unique matched: %(exc)s',
{'net': label, 'exc': exc})
raise exception.PhysicalResourceNameAmbiguity(name=label)
return net_id
def get_nova_network_id(self, net_identifier):
if uuidutils.is_uuid_like(net_identifier):
try:
net_id = self.client().networks.get(net_identifier).id
except exceptions.NotFound:
net_id = self.get_net_id_by_label(net_identifier)
else:
net_id = self.get_net_id_by_label(net_identifier)
return net_id
def attach_volume(self, server_id, volume_id, device):
try:
va = self.client().volumes.create_server_volume(
server_id=server_id,
volume_id=volume_id,
device=device)
except Exception as ex:
if self.is_client_exception(ex):
raise exception.Error(_(
"Failed to attach volume %(vol)s to server %(srv)s "
"- %(err)s") % {'vol': volume_id,
'srv': server_id,
'err': ex})
else:
raise
return va.id
def detach_volume(self, server_id, attach_id):
# detach the volume using volume_attachment
try:
self.client().volumes.delete_server_volume(server_id, attach_id)
except Exception as ex:
if not (self.is_not_found(ex)
or self.is_bad_request(ex)):
raise exception.Error(
_("Could not detach attachment %(att)s "
"from server %(srv)s.") % {'srv': server_id,
'att': attach_id})
def check_detach_volume_complete(self, server_id, attach_id):
"""Check that nova server lost attachment.
This check is needed for immediate reattachment when updating:
there might be some time between cinder marking volume as 'available'
and nova removing attachment from its own objects, so we
check that nova already knows that the volume is detached.
"""
try:
self.client().volumes.get_server_volume(server_id, attach_id)
except Exception as ex:
self.ignore_not_found(ex)
LOG.info(_LI("Volume %(vol)s is detached from server %(srv)s"),
{'vol': attach_id, 'srv': server_id})
return True
else:
LOG.debug("Server %(srv)s still has attachment %(att)s." % {
'att': attach_id, 'srv': server_id})
return False
def interface_detach(self, server_id, port_id):
server = self.fetch_server(server_id)
if server:
server.interface_detach(port_id)
return True
else:
return False
def interface_attach(self, server_id, port_id=None, net_id=None, fip=None):
server = self.fetch_server(server_id)
if server:
server.interface_attach(port_id, net_id, fip)
return True
else:
return False
def _has_extension(self, extension_name):
"""Check if extension is present."""
extensions = self.client().list_extensions.show_all()
return extension_name in [extension.name for extension in extensions]
class ServerConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,)
def validate_with_client(self, client, server):
client.client_plugin('nova').get_server(server)
class KeypairConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.UserKeyPairMissing,)
def validate_with_client(self, client, key_name):
if not key_name:
# Don't validate empty key, which can happen when you
# use a KeyPair resource
return True
client.client_plugin('nova').get_keypair(key_name)
class FlavorConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.FlavorMissing,)
def validate_with_client(self, client, flavor):
client.client_plugin('nova').get_flavor_id(flavor)
class NetworkConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.NovaNetworkNotFound,
exception.PhysicalResourceNameAmbiguity)
def validate_with_client(self, client, network):
client.client_plugin('nova').get_nova_network_id(network)
| 38.17847
| 79
| 0.568487
|
aefc58ee85ce3ea427bb677c84c2f10c0a814f12
| 904
|
py
|
Python
|
Example/Voltage_curve_with_electric_system_control.py
|
sourabh1994/pyInstrument
|
a80786acb8218aba03368cffc475b4fc1bd5f459
|
[
"Unlicense"
] | 2
|
2022-01-25T04:30:24.000Z
|
2022-02-23T11:24:40.000Z
|
Example/Voltage_curve_with_electric_system_control.py
|
sourabh1994/pyInstrument
|
a80786acb8218aba03368cffc475b4fc1bd5f459
|
[
"Unlicense"
] | null | null | null |
Example/Voltage_curve_with_electric_system_control.py
|
sourabh1994/pyInstrument
|
a80786acb8218aba03368cffc475b4fc1bd5f459
|
[
"Unlicense"
] | 3
|
2020-05-21T09:21:31.000Z
|
2021-01-17T08:32:49.000Z
|
"""
n
"""
import pyvisa as visa
from pyinstrument import PSupply
# instrument address
PSN5744USB = "USB0::2391::38151::US15J0384P::0::INSTR"
PSN5744Eth = "TCPIP0::169.254.57.0::inst0::INSTR"
PSTektronix = "USB0::1689::913::081001126668003045::0::INSTR"
rm = visa.ResourceManager()
rm.list_resources()
PS = rm.open_resource(rm.list_resources()[0]) # choose the proper address for your instrument
print('Power supply detected=> ' + PS.query('*IDN?')) # chk communication is established or NOT
ps = PSupply(PS)
ps.on()
ps.setCurr(3) # set current to 3 amp
print('program started')
ps.setVolt(0)
ps.delay(1)
ps.setVolt(11.8) # umin
ps.delay(.4) # tr = 400ms
ps.setVolt(16) # Umax 16V
ps.delay(2) # 2 sec
ps.setVolt(11.8) # tf = 400ms
ps.delay(.4)
ps.setVolt(0)
ps.delay(1)
print(ps.measureVolt())
print(ps.measureCurr())
ps.delay(delay)
ps.off()
PS.close()
print("process complete")
| 21.023256
| 96
| 0.693584
|
8ec1d2148c0d22eb208e1814e09f60a8628cc87e
| 2,070
|
py
|
Python
|
lims/shared/serializers.py
|
sqilz/LIMS-Backend
|
b64e1fa512f89e4492803d44c6b8c35e4d4724cc
|
[
"MIT"
] | 12
|
2017-03-01T10:39:36.000Z
|
2022-01-04T06:17:19.000Z
|
lims/shared/serializers.py
|
sqilz/LIMS-Backend
|
b64e1fa512f89e4492803d44c6b8c35e4d4724cc
|
[
"MIT"
] | 29
|
2017-04-25T14:05:08.000Z
|
2021-06-21T14:41:53.000Z
|
lims/shared/serializers.py
|
sqilz/LIMS-Backend
|
b64e1fa512f89e4492803d44c6b8c35e4d4724cc
|
[
"MIT"
] | 4
|
2017-10-11T16:22:53.000Z
|
2021-02-23T15:45:21.000Z
|
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Organism, Trigger, TriggerSet, TriggerAlert, TriggerAlertStatus, \
TriggerSubscription
class OrganismSerializer(serializers.ModelSerializer):
class Meta:
model = Organism
fields = '__all__'
class TriggerSerializer(serializers.ModelSerializer):
triggerset_id = serializers.PrimaryKeyRelatedField(
queryset=TriggerSet.objects.all(), source='triggerset', write_only=True)
triggerset = serializers.CharField(read_only=True)
class Meta:
model = Trigger
fields = '__all__'
class TriggerSubscriptionSerializer(serializers.ModelSerializer):
triggerset_id = serializers.PrimaryKeyRelatedField(
queryset=TriggerSet.objects.all(), source='triggerset', write_only=True)
# triggerset = TriggerSetSerializer(read_only=True)
triggerset = serializers.CharField(read_only=True)
user = serializers.SlugRelatedField(slug_field='username', queryset=User.objects.all())
class Meta:
model = TriggerSubscription
fields = '__all__'
class TriggerSetSerializer(serializers.ModelSerializer):
triggers = TriggerSerializer(many=True, read_only=True)
subscriptions = TriggerSubscriptionSerializer(many=True, read_only=True)
class Meta:
model = TriggerSet
fields = '__all__'
class TriggerAlertSerializer(serializers.ModelSerializer):
triggerset = TriggerSetSerializer(read_only=True)
# triggerset = serializers.CharField(read_only=True)
class Meta:
model = TriggerAlert
fields = '__all__'
class TriggerAlertStatusSerializer(serializers.ModelSerializer):
triggeralert = TriggerAlertSerializer(read_only=True)
user = serializers.SlugRelatedField(
queryset=User.objects.all(),
slug_field='username')
last_updated_by = serializers.SlugRelatedField(
queryset=User.objects.all(),
slug_field='username')
class Meta:
model = TriggerAlertStatus
fields = '__all__'
| 30.895522
| 91
| 0.734783
|
c64209d45a9768b8509899a710b7545077267823
| 12,241
|
py
|
Python
|
graph4nlp/pytorch/test/kg_completion/main.py
|
IBM/graph4nlp
|
a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
[
"Apache-2.0"
] | 18
|
2020-09-09T03:33:29.000Z
|
2021-07-22T11:17:16.000Z
|
graph4nlp/pytorch/test/kg_completion/main.py
|
IBM/graph4nlp
|
a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
[
"Apache-2.0"
] | null | null | null |
graph4nlp/pytorch/test/kg_completion/main.py
|
IBM/graph4nlp
|
a9bf20b23fa1ec368d9bd40cc8c557f86a9f8297
|
[
"Apache-2.0"
] | 1
|
2021-02-19T19:46:03.000Z
|
2021-02-19T19:46:03.000Z
|
import json
import torch
import pickle
import numpy as np
import argparse
import sys
import os
import math
from os.path import join
import torch.backends.cudnn as cudnn
from evaluation import ranking_and_hits
from models import SACN, ConvTransE, ConvE, DistMult, Complex, TransE
from models_graph4nlp import DistMultGNN, TransEGNN
from src.spodernet.spodernet.preprocessing.pipeline import Pipeline, DatasetStreamer
from src.spodernet.spodernet.preprocessing.processors import JsonLoaderProcessors, Tokenizer, AddToVocab, SaveLengthsToState, StreamToHDF5, SaveMaxLengthsToState, CustomTokenizer
from src.spodernet.spodernet.preprocessing.processors import ConvertTokenToIdx, ApplyFunction, ToLower, DictKey2ListMapper, ApplyFunction, StreamToBatch
from src.spodernet.spodernet.utils.global_config import Config, Backends
from src.spodernet.spodernet.utils.logger import Logger, LogLevel
from src.spodernet.spodernet.preprocessing.batching import StreamBatcher
from src.spodernet.spodernet.preprocessing.pipeline import Pipeline
from src.spodernet.spodernet.preprocessing.processors import TargetIdx2MultiTarget
from src.spodernet.spodernet.hooks import LossHook, ETAHook
from src.spodernet.spodernet.utils.util import Timer
from src.spodernet.spodernet.utils.cuda_utils import CUDATimer
from src.spodernet.spodernet.utils.cuda_utils import CUDATimer
from src.spodernet.spodernet.preprocessing.processors import TargetIdx2MultiTarget
import scipy.sparse as sp
import scipy
from os.path import join, exists
import os, sys
import pickle as pkl
import pickle
path_dir = os.getcwd()
np.set_printoptions(precision=3)
# timer = CUDATimer()
cudnn.benchmark = True
# parse console parameters and set global variables
Config.backend = Backends.TORCH
Config.parse_argv(sys.argv)
# Config.cuda = True
#Config.embedding_dim = 200
model_name = '{2}_{0}_{1}'.format(Config.input_dropout, Config.dropout, Config.model_name)
epochs = 1000
load = False
if Config.dataset is None:
Config.dataset = 'FB15k-237'
model_path = 'saved_models/{0}_{1}.model'.format(Config.dataset, model_name)
''' Preprocess knowledge graph using spodernet. '''
def preprocess(dataset_name, delete_data=False):
full_path = 'data/{0}/e1rel_to_e2_full.json'.format(dataset_name)
train_path = 'data/{0}/e1rel_to_e2_train.json'.format(dataset_name)
dev_ranking_path = 'data/{0}/e1rel_to_e2_ranking_dev.json'.format(dataset_name)
test_ranking_path = 'data/{0}/e1rel_to_e2_ranking_test.json'.format(dataset_name)
keys2keys = {}
keys2keys['e1'] = 'e1' # entities
keys2keys['rel'] = 'rel' # relations
keys2keys['rel_eval'] = 'rel' # relations
keys2keys['e2'] = 'e1' # entities
keys2keys['e2_multi1'] = 'e1' # entity
keys2keys['e2_multi2'] = 'e1' # entity
input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2']
d = DatasetStreamer(input_keys)
d.add_stream_processor(JsonLoaderProcessors())
d.add_stream_processor(DictKey2ListMapper(input_keys))
# process full vocabulary and save it to disk
d.set_path(full_path)
p = Pipeline(Config.dataset, delete_data, keys=input_keys, skip_transformation=True)
p.add_sent_processor(ToLower())
p.add_sent_processor(CustomTokenizer(lambda x: x.split(' ')),keys=['e2_multi1', 'e2_multi2'])
p.add_token_processor(AddToVocab())
p.add_post_processor(ConvertTokenToIdx(keys2keys=keys2keys),
keys=['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2'])
p.add_post_processor(StreamToHDF5('full', samples_per_file=1000, keys=input_keys))
p.execute(d)
p.save_vocabs()
# process train, dev and test sets and save them to hdf5
p.skip_transformation = False
for path, name in zip([train_path, dev_ranking_path, test_ranking_path], ['train', 'dev_ranking', 'test_ranking']):
d.set_path(path)
p.clear_processors()
p.add_sent_processor(ToLower())
p.add_sent_processor(CustomTokenizer(lambda x: x.split(' ')),keys=['e2_multi1', 'e2_multi2'])
p.add_post_processor(ConvertTokenToIdx(keys2keys=keys2keys), keys=['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2'])
p.add_post_processor(StreamToHDF5(name, samples_per_file=1000, keys=input_keys))
p.execute(d)
def main():
#config_path = join(path_dir, 'data', Config.dataset, 'data.npy')
if Config.process: preprocess(Config.dataset, delete_data=True)
input_keys = ['e1', 'rel', 'rel_eval', 'e2', 'e2_multi1', 'e2_multi2']
p = Pipeline(Config.dataset, keys=input_keys)
p.load_vocabs()
vocab = p.state['vocab']
node_list = p.state['vocab']['e1']
rel_list = p.state['vocab']['rel']
num_entities = vocab['e1'].num_token
num_relations = vocab['rel'].num_token
train_batcher = StreamBatcher(Config.dataset, 'train', Config.batch_size, randomize=True, keys=input_keys)
dev_rank_batcher = StreamBatcher(Config.dataset, 'dev_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys)
test_rank_batcher = StreamBatcher(Config.dataset, 'test_ranking', Config.batch_size, randomize=False, loader_threads=4, keys=input_keys)
train_batcher.at_batch_prepared_observers.insert(1,TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary'))
dev_rank_batcher.at_batch_prepared_observers.insert(1,TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary'))
dev_rank_batcher.at_batch_prepared_observers.insert(1,TargetIdx2MultiTarget(num_entities, 'e2_multi2', 'e2_multi2_binary'))
test_rank_batcher.at_batch_prepared_observers.insert(1,TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary'))
test_rank_batcher.at_batch_prepared_observers.insert(1,TargetIdx2MultiTarget(num_entities, 'e2_multi2', 'e2_multi2_binary'))
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
data = []
rows = []
columns = []
for i, str2var in enumerate(train_batcher):
print("batch number:", i)
for j in range(str2var['e1'].shape[0]):
for k in range(str2var['e2_multi1'][j].shape[0]):
if str2var['e2_multi1'][j][k] != 0:
a = str2var['rel'][j].cpu()
data.append(str2var['rel'][j].cpu())
rows.append(str2var['e1'][j].cpu().tolist()[0])
columns.append(str2var['e2_multi1'][j][k].cpu())
else:
break
rows = rows + [i for i in range(num_entities)]
columns = columns + [i for i in range(num_entities)]
data = data + [num_relations for i in range(num_entities)]
if Config.cuda:
indices = torch.LongTensor([rows, columns]).cuda()
v = torch.LongTensor(data).cuda()
adjacencies = [indices, v, num_entities]
else:
indices = torch.LongTensor([rows, columns])
v = torch.LongTensor(data)
adjacencies = [indices, v, num_entities]
#filename = join(path_dir, 'data', Config.dataset, 'adj.pkl')
#file = open(filename, 'wb+')
#pkl.dump(adjacencies, file)
#file.close()
print('Finished the preprocessing')
############
X = torch.LongTensor([i for i in range(num_entities)])
if Config.model_name is None:
model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token)
elif Config.model_name == 'SACN':
model = SACN(vocab['e1'].num_token, vocab['rel'].num_token)
elif Config.model_name == 'ConvTransE':
model = ConvTransE(vocab['e1'].num_token, vocab['rel'].num_token)
elif Config.model_name == 'ConvE':
model = ConvE(vocab['e1'].num_token, vocab['rel'].num_token)
elif Config.model_name == 'DistMult':
model = DistMult(vocab['e1'].num_token, vocab['rel'].num_token)
elif Config.model_name == 'DistMultGNN':
model = DistMultGNN(vocab['e1'].num_token, vocab['rel'].num_token)
elif Config.model_name == 'TransE':
model = TransE(vocab['e1'].num_token, vocab['rel'].num_token)
elif Config.model_name == 'TransEGNN':
model = TransEGNN(vocab['e1'].num_token, vocab['rel'].num_token)
elif Config.model_name == 'ComplEx':
model = Complex(vocab['e1'].num_token, vocab['rel'].num_token)
else:
# log.info('Unknown model: {0}', Config.model_name)
raise Exception("Unknown model!")
#train_batcher.at_batch_prepared_observers.insert(1,TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary'))
train_batcher = StreamBatcher(Config.dataset, 'train', Config.batch_size, randomize=True, keys=input_keys)
eta = ETAHook('train', print_every_x_batches=100)
train_batcher.subscribe_to_events(eta)
train_batcher.subscribe_to_start_of_epoch_event(eta)
train_batcher.subscribe_to_events(LossHook('train', print_every_x_batches=100))
train_batcher.at_batch_prepared_observers.insert(1,TargetIdx2MultiTarget(num_entities, 'e2_multi1', 'e2_multi1_binary'))
if Config.cuda:
model.cuda()
X = X.cuda()
if load:
model_params = torch.load(model_path)
print(model)
total_param_size = []
params = [(key, value.size(), value.numel()) for key, value in model_params.items()]
for key, size, count in params:
total_param_size.append(count)
print(key, size, count)
print(np.array(total_param_size).sum())
model.load_state_dict(model_params)
model.eval()
ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation')
ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation')
else:
model.init()
total_param_size = []
params = [value.numel() for value in model.parameters()]
print(params)
print(np.sum(params))
opt = torch.optim.Adam(model.parameters(), lr=Config.learning_rate, weight_decay=Config.L2)
for epoch in range(epochs):
model.train()
for i, str2var in enumerate(train_batcher):
opt.zero_grad()
if Config.cuda:
e1 = str2var['e1'].cuda()
rel = str2var['rel'].cuda()
e2_multi = str2var['e2_multi1_binary'].float().cuda()
else:
e1 = str2var['e1']
rel = str2var['rel']
e2_multi = str2var['e2_multi1_binary'].float()
if model.loss_name == "SoftMarginLoss":
e2_multi[e2_multi==0] = -1
pred = model.forward(e1, rel, X, adjacencies)
# loss = model.loss(pred.view(-1,1).squeeze(), e2_multi.view(-1,1).squeeze())
loss = model.loss(pred, e2_multi)
elif model.loss_name == "SoftplusLoss" or model.loss_name == "SigmoidLoss":
pred, pos, neg = model.forward(e1, rel, X, adjacencies, e2_multi)
loss = model.loss(pos, neg)
elif model.loss_name == "BCELoss":
# label smoothing
e2_multi = ((1.0 - Config.label_smoothing_epsilon) * e2_multi) + (1.0 / e2_multi.size(1))
pred = model.forward(e1, rel, X, adjacencies)
loss = model.loss(pred, e2_multi)
else: # MSELoss
# label smoothing
e2_multi = ((1.0-Config.label_smoothing_epsilon)*e2_multi) + (1.0/e2_multi.size(1))
pred = model.forward(e1, rel, X, adjacencies)
loss = model.loss(pred.view(-1,1).squeeze(), e2_multi.view(-1,1).squeeze())
# loss = model.loss(pred, e2_multi)
loss.backward()
opt.step()
train_batcher.state.loss = loss.cpu()
print('saving to {0}'.format(model_path))
torch.save(model.state_dict(), model_path)
model.eval()
with torch.no_grad():
ranking_and_hits(model, dev_rank_batcher, vocab, 'dev_evaluation', X, adjacencies)
if epoch % 3 == 0:
if epoch > 0:
ranking_and_hits(model, test_rank_batcher, vocab, 'test_evaluation', X, adjacencies)
if __name__ == '__main__':
main()
| 42.950877
| 178
| 0.673474
|
bcd6e62be707fd6a308b29343786314467134eae
| 4,256
|
py
|
Python
|
games/ttt.py
|
Prie-st/Aoi
|
1fec6fe487d4d1dab0bb125ebaa91051e3e8f866
|
[
"MIT"
] | null | null | null |
games/ttt.py
|
Prie-st/Aoi
|
1fec6fe487d4d1dab0bb125ebaa91051e3e8f866
|
[
"MIT"
] | null | null | null |
games/ttt.py
|
Prie-st/Aoi
|
1fec6fe487d4d1dab0bb125ebaa91051e3e8f866
|
[
"MIT"
] | null | null | null |
import asyncio
import random
from typing import Tuple
import aoi
import discord
from libs.conversions import discord_number_emojis
from .base import Game
class TicTacToe(Game):
def __init__(self, ctx: aoi.AoiContext):
super().__init__(ctx)
async def play(self): # noqa C901
board = [[0] * 3 for _ in range(3)]
def _c(x: int) -> Tuple[int, int]:
return (x - 1) // 3, (x - 1) % 3
def _xo(num, neg=False):
return [":x:", None, ":o:"][num + 1] if not neg else \
[":regional_indicator_x:", None, ":regional_indicator_o:"][num + 1]
def _get_board():
s = "_ _\n"
for i in range(1, 10):
row, col = _c(i)
cur = board[row][col]
s += (_xo(cur) if cur else discord_number_emojis(i))
if col == 2:
s += "\n"
return s
def _status():
wins = [
[4, 5, 6],
[1, 2, 3],
[7, 8, 9],
[8, 5, 2],
[9, 6, 3],
[7, 5, 3],
[9, 5, 1],
[7, 4, 1]
]
for i in [-1, 1]:
for row in wins:
if all([board[_c(j)[0]][_c(j)[1]] == i for j in row]):
return i, row
for row in board:
for col in row:
if col == 0:
return 0, []
return 2, []
def _make_next():
# make winning move
for i in range(1, 10):
orig = board[_c(i)[0]][_c(i)[1]]
if orig != 0:
continue
board[_c(i)[0]][_c(i)[1]] = -1
if _status()[0] == -1:
board[_c(i)[0]][_c(i)[1]] = -1
return
board[_c(i)[0]][_c(i)[1]] = orig
# block player's winning move
for i in range(1, 10):
orig = board[_c(i)[0]][_c(i)[1]]
if orig != 0:
continue
board[_c(i)[0]][_c(i)[1]] = 1
if _status()[0] == 1:
board[_c(i)[0]][_c(i)[1]] = -1
return
board[_c(i)[0]][_c(i)[1]] = orig
# pick a random square
sq = random.choice(list(filter(lambda i: board[_c(i)[0]][_c(i)[1]] == 0, list(range(0, 9)))))
board[_c(sq)[0]][_c(sq)[1]] = -1
comp = (random.random() > 0.5)
msg = await self.ctx.embed(title="Type 1-9", description=_get_board())
while True:
if not comp:
await msg.edit(embed=discord.Embed(title="Your turn!",
description=_get_board(), colour=discord.Colour.blue()))
sq = await self.ctx.input(int, ch=lambda x: (0 < x < 10) and board[_c(x)[0]][_c(x)[1]] == 0,
del_response=True)
board[_c(sq)[0]][_c(sq)[1]] = 1
if _status()[0] != 0:
break
else:
await msg.edit(embed=discord.Embed(title="My turn!",
description=_get_board(), colour=discord.Colour.gold()))
async with self.ctx.typing():
await asyncio.sleep(1)
_make_next()
if _status()[0] != 0:
break
comp = not comp
winner, win = _status()
s = "_ _\n"
for i in range(1, 10):
row, col = _c(i)
cur = board[row][col]
s += (_xo(cur, neg=(i in win)) if cur else ":black_large_square:")
if col == 2:
s += "\n"
if winner == 1:
title = "You win!"
color = discord.Colour.green()
elif winner == -1:
title = "You Lose ):"
color = discord.Colour.red()
else:
title = "It's a tie"
color = discord.Colour.purple()
await msg.edit(embed=discord.Embed(title=title,
description=s, colour=color))
| 33.511811
| 108
| 0.400376
|
32298fcd57d5baf271310f72bab2e0820b13b179
| 3,390
|
py
|
Python
|
tests/python/unittest/test_gluon_data.py
|
Najah-lshanableh/Deep_learning
|
4b8235bdacd319843dda7b331f207808e4a90a93
|
[
"Apache-2.0"
] | null | null | null |
tests/python/unittest/test_gluon_data.py
|
Najah-lshanableh/Deep_learning
|
4b8235bdacd319843dda7b331f207808e4a90a93
|
[
"Apache-2.0"
] | null | null | null |
tests/python/unittest/test_gluon_data.py
|
Najah-lshanableh/Deep_learning
|
4b8235bdacd319843dda7b331f207808e4a90a93
|
[
"Apache-2.0"
] | 1
|
2018-11-30T21:34:24.000Z
|
2018-11-30T21:34:24.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tarfile
import mxnet as mx
import numpy as np
from mxnet import gluon
def test_array_dataset():
X = np.random.uniform(size=(10, 20))
Y = np.random.uniform(size=(10,))
dataset = gluon.data.ArrayDataset(X, Y)
loader = gluon.data.DataLoader(dataset, 2)
for i, (x, y) in enumerate(loader):
assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])
assert mx.test_utils.almost_equal(y.asnumpy(), Y[i*2:(i+1)*2])
def prepare_record():
if not os.path.isdir("data/test_images"):
os.makedirs('data/test_images')
if not os.path.isdir("data/test_images/test_images"):
gluon.utils.download("http://data.mxnet.io/data/test_images.tar.gz", "data/test_images.tar.gz")
tarfile.open('data/test_images.tar.gz').extractall('data/test_images/')
if not os.path.exists('data/test.rec'):
imgs = os.listdir('data/test_images/test_images')
record = mx.recordio.MXIndexedRecordIO('data/test.idx', 'data/test.rec', 'w')
for i, img in enumerate(imgs):
str_img = open('data/test_images/test_images/'+img, 'rb').read()
s = mx.recordio.pack((0, i, i, 0), str_img)
record.write_idx(i, s)
return 'data/test.rec'
def test_recordimage_dataset():
recfile = prepare_record()
dataset = gluon.data.vision.ImageRecordDataset(recfile)
loader = gluon.data.DataLoader(dataset, 1)
for i, (x, y) in enumerate(loader):
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i
def test_sampler():
seq_sampler = gluon.data.SequentialSampler(10)
assert list(seq_sampler) == list(range(10))
rand_sampler = gluon.data.RandomSampler(10)
assert sorted(list(rand_sampler)) == list(range(10))
seq_batch_keep = gluon.data.BatchSampler(seq_sampler, 3, 'keep')
assert sum(list(seq_batch_keep), []) == list(range(10))
seq_batch_discard = gluon.data.BatchSampler(seq_sampler, 3, 'discard')
assert sum(list(seq_batch_discard), []) == list(range(9))
rand_batch_keep = gluon.data.BatchSampler(rand_sampler, 3, 'keep')
assert sorted(sum(list(rand_batch_keep), [])) == list(range(10))
def test_datasets():
assert len(gluon.data.vision.MNIST(root='data')) == 60000
assert len(gluon.data.vision.CIFAR10(root='data', train=False)) == 10000
def test_image_folder_dataset():
prepare_record()
dataset = gluon.data.vision.ImageFolderDataset('data/test_images')
assert dataset.synsets == ['test_images']
assert len(dataset.items) == 16
if __name__ == '__main__':
import nose
nose.runmodule()
| 39.418605
| 103
| 0.69469
|
dd7443f07e3506a1b8085024ac2ac98edbbb8ff3
| 18,103
|
py
|
Python
|
src/_pytest/pathlib.py
|
Vlad-Radz/pytest
|
1a4e8991bdac9855c9ed1a025989579283d03065
|
[
"MIT"
] | null | null | null |
src/_pytest/pathlib.py
|
Vlad-Radz/pytest
|
1a4e8991bdac9855c9ed1a025989579283d03065
|
[
"MIT"
] | null | null | null |
src/_pytest/pathlib.py
|
Vlad-Radz/pytest
|
1a4e8991bdac9855c9ed1a025989579283d03065
|
[
"MIT"
] | 1
|
2020-12-10T06:21:35.000Z
|
2020-12-10T06:21:35.000Z
|
import atexit
import contextlib
import fnmatch
import importlib.util
import itertools
import os
import shutil
import sys
import uuid
import warnings
from enum import Enum
from functools import partial
from os.path import expanduser
from os.path import expandvars
from os.path import isabs
from os.path import sep
from posixpath import sep as posix_sep
from types import ModuleType
from typing import Iterable
from typing import Iterator
from typing import Optional
from typing import Set
from typing import TypeVar
from typing import Union
import py
from _pytest.compat import assert_never
from _pytest.outcomes import skip
from _pytest.warning_types import PytestWarning
if sys.version_info[:2] >= (3, 6):
from pathlib import Path, PurePath
else:
from pathlib2 import Path, PurePath
__all__ = ["Path", "PurePath"]
LOCK_TIMEOUT = 60 * 60 * 3
_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath)
def get_lock_path(path: _AnyPurePath) -> _AnyPurePath:
return path.joinpath(".lock")
def ensure_reset_dir(path: Path) -> None:
"""
ensures the given path is an empty directory
"""
if path.exists():
rm_rf(path)
path.mkdir()
def on_rm_rf_error(func, path: str, exc, *, start_path: Path) -> bool:
"""Handles known read-only errors during rmtree.
The returned value is used only by our own tests.
"""
exctype, excvalue = exc[:2]
# another process removed the file in the middle of the "rm_rf" (xdist for example)
# more context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018
if isinstance(excvalue, FileNotFoundError):
return False
if not isinstance(excvalue, PermissionError):
warnings.warn(
PytestWarning(
"(rm_rf) error removing {}\n{}: {}".format(path, exctype, excvalue)
)
)
return False
if func not in (os.rmdir, os.remove, os.unlink):
if func not in (os.open,):
warnings.warn(
PytestWarning(
"(rm_rf) unknown function {} when removing {}:\n{}: {}".format(
func, path, exctype, excvalue
)
)
)
return False
# Chmod + retry.
import stat
def chmod_rw(p: str) -> None:
mode = os.stat(p).st_mode
os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR)
# For files, we need to recursively go upwards in the directories to
# ensure they all are also writable.
p = Path(path)
if p.is_file():
for parent in p.parents:
chmod_rw(str(parent))
# stop when we reach the original path passed to rm_rf
if parent == start_path:
break
chmod_rw(str(path))
func(path)
return True
def ensure_extended_length_path(path: Path) -> Path:
"""Get the extended-length version of a path (Windows).
On Windows, by default, the maximum length of a path (MAX_PATH) is 260
characters, and operations on paths longer than that fail. But it is possible
to overcome this by converting the path to "extended-length" form before
performing the operation:
https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation
On Windows, this function returns the extended-length absolute version of path.
On other platforms it returns path unchanged.
"""
if sys.platform.startswith("win32"):
path = path.resolve()
path = Path(get_extended_length_path_str(str(path)))
return path
def get_extended_length_path_str(path: str) -> str:
"""Converts to extended length path as a str"""
long_path_prefix = "\\\\?\\"
unc_long_path_prefix = "\\\\?\\UNC\\"
if path.startswith((long_path_prefix, unc_long_path_prefix)):
return path
# UNC
if path.startswith("\\\\"):
return unc_long_path_prefix + path[2:]
return long_path_prefix + path
def rm_rf(path: Path) -> None:
"""Remove the path contents recursively, even if some elements
are read-only.
"""
path = ensure_extended_length_path(path)
onerror = partial(on_rm_rf_error, start_path=path)
shutil.rmtree(str(path), onerror=onerror)
def find_prefixed(root: Path, prefix: str) -> Iterator[Path]:
"""finds all elements in root that begin with the prefix, case insensitive"""
l_prefix = prefix.lower()
for x in root.iterdir():
if x.name.lower().startswith(l_prefix):
yield x
def extract_suffixes(iter: Iterable[PurePath], prefix: str) -> Iterator[str]:
"""
:param iter: iterator over path names
:param prefix: expected prefix of the path names
:returns: the parts of the paths following the prefix
"""
p_len = len(prefix)
for p in iter:
yield p.name[p_len:]
def find_suffixes(root: Path, prefix: str) -> Iterator[str]:
"""combines find_prefixes and extract_suffixes
"""
return extract_suffixes(find_prefixed(root, prefix), prefix)
def parse_num(maybe_num) -> int:
"""parses number path suffixes, returns -1 on error"""
try:
return int(maybe_num)
except ValueError:
return -1
def _force_symlink(
root: Path, target: Union[str, PurePath], link_to: Union[str, Path]
) -> None:
"""helper to create the current symlink
it's full of race conditions that are reasonably ok to ignore
for the context of best effort linking to the latest test run
the presumption being that in case of much parallelism
the inaccuracy is going to be acceptable
"""
current_symlink = root.joinpath(target)
try:
current_symlink.unlink()
except OSError:
pass
try:
current_symlink.symlink_to(link_to)
except Exception:
pass
def make_numbered_dir(root: Path, prefix: str) -> Path:
"""create a directory with an increased number as suffix for the given prefix"""
for i in range(10):
# try up to 10 times to create the folder
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
new_number = max_existing + 1
new_path = root.joinpath("{}{}".format(prefix, new_number))
try:
new_path.mkdir()
except Exception:
pass
else:
_force_symlink(root, prefix + "current", new_path)
return new_path
else:
raise OSError(
"could not create numbered dir with prefix "
"{prefix} in {root} after 10 tries".format(prefix=prefix, root=root)
)
def create_cleanup_lock(p: Path) -> Path:
"""crates a lock to prevent premature folder cleanup"""
lock_path = get_lock_path(p)
try:
fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
except FileExistsError as e:
raise OSError("cannot create lockfile in {path}".format(path=p)) from e
else:
pid = os.getpid()
spid = str(pid).encode()
os.write(fd, spid)
os.close(fd)
if not lock_path.is_file():
raise OSError("lock path got renamed after successful creation")
return lock_path
def register_cleanup_lock_removal(lock_path: Path, register=atexit.register):
"""registers a cleanup function for removing a lock, by default on atexit"""
pid = os.getpid()
def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None:
current_pid = os.getpid()
if current_pid != original_pid:
# fork
return
try:
lock_path.unlink()
except OSError:
pass
return register(cleanup_on_exit)
def maybe_delete_a_numbered_dir(path: Path) -> None:
"""removes a numbered directory if its lock can be obtained and it does not seem to be in use"""
path = ensure_extended_length_path(path)
lock_path = None
try:
lock_path = create_cleanup_lock(path)
parent = path.parent
garbage = parent.joinpath("garbage-{}".format(uuid.uuid4()))
path.rename(garbage)
rm_rf(garbage)
except OSError:
# known races:
# * other process did a cleanup at the same time
# * deletable folder was found
# * process cwd (Windows)
return
finally:
# if we created the lock, ensure we remove it even if we failed
# to properly remove the numbered dir
if lock_path is not None:
try:
lock_path.unlink()
except OSError:
pass
def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool:
"""checks if a lock exists and breaks it if its considered dead"""
if path.is_symlink():
return False
lock = get_lock_path(path)
if not lock.exists():
return True
try:
lock_time = lock.stat().st_mtime
except Exception:
return False
else:
if lock_time < consider_lock_dead_if_created_before:
# wa want to ignore any errors while trying to remove the lock such as:
# - PermissionDenied, like the file permissions have changed since the lock creation
# - FileNotFoundError, in case another pytest process got here first.
# and any other cause of failure.
with contextlib.suppress(OSError):
lock.unlink()
return True
return False
def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None:
"""tries to cleanup a folder if we can ensure it's deletable"""
if ensure_deletable(path, consider_lock_dead_if_created_before):
maybe_delete_a_numbered_dir(path)
def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]:
"""lists candidates for numbered directories to be removed - follows py.path"""
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
max_delete = max_existing - keep
paths = find_prefixed(root, prefix)
paths, paths2 = itertools.tee(paths)
numbers = map(parse_num, extract_suffixes(paths2, prefix))
for path, number in zip(paths, numbers):
if number <= max_delete:
yield path
def cleanup_numbered_dir(
root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float
) -> None:
"""cleanup for lock driven numbered directories"""
for path in cleanup_candidates(root, prefix, keep):
try_cleanup(path, consider_lock_dead_if_created_before)
for path in root.glob("garbage-*"):
try_cleanup(path, consider_lock_dead_if_created_before)
def make_numbered_dir_with_cleanup(
root: Path, prefix: str, keep: int, lock_timeout: float
) -> Path:
"""creates a numbered dir with a cleanup lock and removes old ones"""
e = None
for i in range(10):
try:
p = make_numbered_dir(root, prefix)
lock_path = create_cleanup_lock(p)
register_cleanup_lock_removal(lock_path)
except Exception as exc:
e = exc
else:
consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout
# Register a cleanup for program exit
atexit.register(
cleanup_numbered_dir,
root,
prefix,
keep,
consider_lock_dead_if_created_before,
)
return p
assert e is not None
raise e
def resolve_from_str(input: str, root):
assert not isinstance(input, Path), "would break on py2"
root = Path(root)
input = expanduser(input)
input = expandvars(input)
if isabs(input):
return Path(input)
else:
return root.joinpath(input)
def fnmatch_ex(pattern: str, path) -> bool:
"""FNMatcher port from py.path.common which works with PurePath() instances.
The difference between this algorithm and PurePath.match() is that the latter matches "**" glob expressions
for each part of the path, while this algorithm uses the whole path instead.
For example:
"tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" with this algorithm, but not with
PurePath.match().
This algorithm was ported to keep backward-compatibility with existing settings which assume paths match according
this logic.
References:
* https://bugs.python.org/issue29249
* https://bugs.python.org/issue34731
"""
path = PurePath(path)
iswin32 = sys.platform.startswith("win")
if iswin32 and sep not in pattern and posix_sep in pattern:
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posix_sep, sep)
if sep not in pattern:
name = path.name
else:
name = str(path)
if path.is_absolute() and not os.path.isabs(pattern):
pattern = "*{}{}".format(os.sep, pattern)
return fnmatch.fnmatch(name, pattern)
def parts(s: str) -> Set[str]:
parts = s.split(sep)
return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))}
def symlink_or_skip(src, dst, **kwargs):
"""Makes a symlink or skips the test in case symlinks are not supported."""
try:
os.symlink(str(src), str(dst), **kwargs)
except OSError as e:
skip("symlinks not supported: {}".format(e))
class ImportMode(Enum):
"""Possible values for `mode` parameter of `import_path`"""
prepend = "prepend"
append = "append"
importlib = "importlib"
class ImportPathMismatchError(ImportError):
"""Raised on import_path() if there is a mismatch of __file__'s.
This can happen when `import_path` is called multiple times with different filenames that has
the same basename but reside in packages
(for example "/tests1/test_foo.py" and "/tests2/test_foo.py").
"""
def import_path(
p: Union[str, py.path.local, Path],
*,
mode: Union[str, ImportMode] = ImportMode.prepend
) -> ModuleType:
"""
Imports and returns a module from the given path, which can be a file (a module) or
a directory (a package).
The import mechanism used is controlled by the `mode` parameter:
* `mode == ImportMode.prepend`: the directory containing the module (or package, taking
`__init__.py` files into account) will be put at the *start* of `sys.path` before
being imported with `__import__.
* `mode == ImportMode.append`: same as `prepend`, but the directory will be appended
to the end of `sys.path`, if not already in `sys.path`.
* `mode == ImportMode.importlib`: uses more fine control mechanisms provided by `importlib`
to import the module, which avoids having to use `__import__` and muck with `sys.path`
at all. It effectively allows having same-named test modules in different places.
:raise ImportPathMismatchError: if after importing the given `path` and the module `__file__`
are different. Only raised in `prepend` and `append` modes.
"""
mode = ImportMode(mode)
path = Path(str(p))
if not path.exists():
raise ImportError(path)
if mode is ImportMode.importlib:
module_name = path.stem
for meta_importer in sys.meta_path:
spec = meta_importer.find_spec(module_name, [str(path.parent)])
if spec is not None:
break
else:
spec = importlib.util.spec_from_file_location(module_name, str(path))
if spec is None:
raise ImportError(
"Can't find module {} at location {}".format(module_name, str(path))
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore[union-attr]
return mod
pkg_path = resolve_package_path(path)
if pkg_path is not None:
pkg_root = pkg_path.parent
names = list(path.with_suffix("").relative_to(pkg_root).parts)
if names[-1] == "__init__":
names.pop()
module_name = ".".join(names)
else:
pkg_root = path.parent
module_name = path.stem
# change sys.path permanently: restoring it at the end of this function would cause surprising
# problems because of delayed imports: for example, a conftest.py file imported by this function
# might have local imports, which would fail at runtime if we restored sys.path.
if mode is ImportMode.append:
if str(pkg_root) not in sys.path:
sys.path.append(str(pkg_root))
elif mode is ImportMode.prepend:
if str(pkg_root) != sys.path[0]:
sys.path.insert(0, str(pkg_root))
else:
assert_never(mode)
importlib.import_module(module_name)
mod = sys.modules[module_name]
if path.name == "__init__.py":
return mod
ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "")
if ignore != "1":
module_file = mod.__file__
if module_file.endswith((".pyc", ".pyo")):
module_file = module_file[:-1]
if module_file.endswith(os.path.sep + "__init__.py"):
module_file = module_file[: -(len(os.path.sep + "__init__.py"))]
try:
is_same = os.path.samefile(str(path), module_file)
except FileNotFoundError:
is_same = False
if not is_same:
raise ImportPathMismatchError(module_name, module_file, path)
return mod
def resolve_package_path(path: Path) -> Optional[Path]:
"""Return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Return None if it can not be determined.
"""
result = None
for parent in itertools.chain((path,), path.parents):
if parent.is_dir():
if not parent.joinpath("__init__.py").is_file():
break
if not parent.name.isidentifier():
break
result = parent
return result
| 32.676895
| 118
| 0.649064
|
0c858904780399b5116c4f28f2c60aa305f361c1
| 2,402
|
py
|
Python
|
cartomancy/brains/environments/go_fish.py
|
joedaws/card-player
|
6e44bcc7c3e416fbd002c1d0216cf75e213a74c1
|
[
"MIT"
] | null | null | null |
cartomancy/brains/environments/go_fish.py
|
joedaws/card-player
|
6e44bcc7c3e416fbd002c1d0216cf75e213a74c1
|
[
"MIT"
] | null | null | null |
cartomancy/brains/environments/go_fish.py
|
joedaws/card-player
|
6e44bcc7c3e416fbd002c1d0216cf75e213a74c1
|
[
"MIT"
] | null | null | null |
import gym
from gym import spaces
from gym.utils import seeding
class GoFishEnv(gym.Env):
"""Go Fish environments implementation.
Action Space:
Each players may ask for one of the ranks from one of the players.
Therefore, the action spaces is a tuple with the number of players
and a discrete spaces of 13 (the total number of ranks).
Observation Space:
Each players
Args:
num_oppoents (int): number of OTHER players that will play the game.
"""
NUM_RANKS = 13 # total number of ranks that a card may have.
def __init__(self, num_opponents):
self.action_space = spaces.Tuple((
spaces.Discrete(num_opponents),
spaces.Discrete(self.NUM_RANKS)
))
self.observation_space = spaces.Tuple((
spaces.Discrete
))
self.observation_space = spaces.Tuple((
spaces.Discrete(32),
spaces.Discrete(11),
spaces.Discrete(2)))
self.seed()
# Flag to payout 1.5 on a "natural" blackjack win, like casino rules
# Ref: http://www.bicyclecards.com/how-to-play/blackjack/
self.natural = natural
# Start the first game
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
if action: # hit: add a card to players hand and return
self.player.append(draw_card(self.np_random))
if is_bust(self.player):
done = True
reward = -1.
else:
done = False
reward = 0.
else: # stick: play out the dealers hand, and score
done = True
while sum_hand(self.dealer) < 17:
self.dealer.append(draw_card(self.np_random))
reward = cmp(score(self.player), score(self.dealer))
if self.natural and is_natural(self.player) and reward == 1.:
reward = 1.5
return self._get_obs(), reward, done, {}
def _get_obs(self):
return (sum_hand(self.player), self.dealer[0], usable_ace(self.player))
def reset(self):
self.dealer = draw_hand(self.np_random)
self.player = draw_hand(self.np_random)
return self._get_obs()
| 32.90411
| 79
| 0.59159
|
036b8f7924798437fa8140889df31bfe8ee74225
| 497
|
py
|
Python
|
purses/model.py
|
pgdr/purses
|
1e6073c3639d73f1405149c39448d39d4d29432f
|
[
"Apache-2.0"
] | null | null | null |
purses/model.py
|
pgdr/purses
|
1e6073c3639d73f1405149c39448d39d4d29432f
|
[
"Apache-2.0"
] | 4
|
2018-06-21T09:52:42.000Z
|
2018-06-23T08:00:18.000Z
|
purses/model.py
|
pgdr/purses
|
1e6073c3639d73f1405149c39448d39d4d29432f
|
[
"Apache-2.0"
] | 1
|
2018-06-20T19:14:49.000Z
|
2018-06-20T19:14:49.000Z
|
class Model(object):
def __init__(self, df, fname="Purses"):
self.df = df
self.fname = fname
self.show_index = True
@property
def columns(self):
if self.show_index:
return [""] + list(self.df.columns)
return list(self.df.columns)
def row(self, r):
if self.show_index:
return [self.df.index[r]] + list(self.df.iloc[r])
return list(self.df.iloc[r])
def __len__(self):
return len(self.df)
| 24.85
| 61
| 0.559356
|
33a87e381b38fc9d861fb1c6711b93a8673bd98a
| 23,429
|
py
|
Python
|
polo/sql.py
|
lesburg/polo
|
d746a353fe14f56da1697e57d90351f0aec03ee2
|
[
"MIT"
] | 1
|
2020-05-09T01:42:32.000Z
|
2020-05-09T01:42:32.000Z
|
polo/sql.py
|
lesburg/polo
|
d746a353fe14f56da1697e57d90351f0aec03ee2
|
[
"MIT"
] | null | null | null |
polo/sql.py
|
lesburg/polo
|
d746a353fe14f56da1697e57d90351f0aec03ee2
|
[
"MIT"
] | 4
|
2020-05-11T19:45:25.000Z
|
2022-03-18T09:51:09.000Z
|
"""
sql.py
==================
This file contains most of the hand-crafted SQL queries used by various methods in the API.
"""
sql_tree_user_node = """
SELECT id
FROM TreeNode
WHERE name = ?
AND ParentID = 3
"""
sql_tree = """
SELECT t.ParentID as parent_id,
t.id,
p.id as plate_id,
p.Barcode as barcode,
t.Type as node_type,
CASE
WHEN p.id IS NULL THEN t.name
ELSE CONCAT(p.PlateNumber, ',', p.id, ',', p.Barcode)
END as name,
CASE
WHEN (SELECT COUNT(id) FROM TreeNode tn WHERE tn.ParentID = t.id) = 0
THEN CAST(0 AS BIT)
ELSE CAST(1 AS BIT)
END as has_children
FROM dbo.TreeNode t
FULL OUTER JOIN Plate p on p.TreeNodeID = t.id
WHERE ParentID = ?
AND name NOT LIKE '[_]%'
AND name NOT IN ('Formulatrix', 'Instant Imaging', 'Quality Test', 'Timing Test')
AND Type IN ('Project', 'ProjectsFolder', 'ProjectFolder', 'Experiment', 'ExperimentPlate')
ORDER BY name
"""
sql_all = """
SELECT ranked.id,
plate_id,
barcode,
well_num,
drop_num,
crystal,
other,
DATE_FORMAT(date_imaged, "%%m/%%d/%%Y") date_imaged,
CONCAT(s.url_prefix, relative_path) url
FROM (SELECT id,
plate_id,
barcode,
well_num,
drop_num,
crystal,
other,
date_imaged,
source_id,
relative_path,
@pwd_rank := IF(@current_plate = plate_id AND @current_well = well_num AND @current_drop = drop_num, @pwd_rank + 1, 1) AS pwd_rank,
@current_plate := plate_id,
@current_well := well_num,
@current_drop := drop_num
FROM image_scores
WHERE source_id = %s
AND scored_by = %s
AND plate_id IN ({query_placeholders})
{temperature_filter}
{drop_filter}
{score_filter}
AND crystal IS NOT NULL
AND crystal > {min_score}
ORDER BY plate_id, well_num, drop_num, {rank_method} DESC
) ranked, sources s
WHERE pwd_rank <= 1
AND source_id = s.id
"""
sql_set_sort_order = "SET @sort_dir = %s"
sql_set_pwd_rank = "SET @pwd_rank = 0"
sql_paginated_without_timecourse = """
SELECT CONCAT(CAST(plate_num AS CHAR), ',', CAST(plate_id as CHAR), ',', barcode) as plate_name,
ranked.id,
plate_id,
well_num,
CONCAT(row_letter, CAST(column_num AS CHAR)) as well_name,
drop_num,
crystal,
other,
date_imaged date_imaged_orig,
DATE_FORMAT(date_imaged, "%%m/%%d/%%Y") date_imaged,
CONCAT(s.url_prefix, relative_path) url,
temperature,
(
SELECT CAST(GROUP_CONCAT(manual_call ORDER BY disputed_at ASC) AS CHAR)
FROM image_scores i,
disputes d
WHERE i.id = d.image_score_id
AND ranked.id = d.image_score_id
AND ranked.source_id = i.source_id
) disputes
FROM (SELECT id,
plate_id,
barcode,
plate_num,
well_num,
row_letter,
column_num,
drop_num,
crystal,
other,
date_imaged,
source_id,
relative_path,
temperature,
@pwd_rank := IF(@current_plate = plate_id AND @current_well = well_num AND @current_drop = drop_num, @pwd_rank + 1, 1) AS pwd_rank,
@current_plate := plate_id,
@current_well := well_num,
@current_drop := drop_num
FROM image_scores
WHERE source_id = %s
AND scored_by = %s
AND plate_id IN ({query_placeholders})
{temperature_filter}
{drop_filter}
{score_filter}
AND crystal IS NOT NULL
AND crystal > {min_score}
ORDER BY plate_id, well_num, drop_num, {rank_method} desc
) ranked,
sources s
WHERE pwd_rank <= 1
AND source_id = s.id
ORDER BY
CASE WHEN @sort_dir = 'plate_id asc' THEN plate_id END ASC,
CASE WHEN @sort_dir = 'plate_id desc' THEN plate_id END DESC,
CASE WHEN @sort_dir = 'well_num asc' THEN well_num END ASC,
CASE WHEN @sort_dir = 'well_num desc' THEN well_num END DESC,
CASE WHEN @sort_dir = 'drop_num asc' THEN drop_num END ASC,
CASE WHEN @sort_dir = 'drop_num desc' THEN drop_num END DESC,
CASE WHEN @sort_dir = 'barcode asc' THEN barcode END ASC,
CASE WHEN @sort_dir = 'barcode desc' THEN barcode END DESC,
CASE WHEN @sort_dir = 'crystal asc' THEN crystal END ASC,
CASE WHEN @sort_dir = 'crystal desc' THEN crystal END DESC,
CASE WHEN @sort_dir = 'other asc' THEN other END ASC,
CASE WHEN @sort_dir = 'other desc' THEN other END DESC,
CASE WHEN @sort_dir = 'date_imaged asc' THEN date_imaged_orig END ASC,
CASE WHEN @sort_dir = 'date_imaged desc' THEN date_imaged_orig END DESC,
CASE WHEN @sort_dir = 'temperature asc' THEN temperature END ASC,
CASE WHEN @sort_dir = 'temperature desc' THEN temperature END DESC,
crystal DESC
LIMIT %s
OFFSET %s
"""
sql_paginated_with_timecourse = """
SELECT CONCAT(CAST(plate_num AS CHAR), ',', CAST(plate_id as CHAR), ',', barcode) as plate_name,
ranked.id,
plate_id,
well_num,
CONCAT(row_letter, CAST(column_num AS CHAR)) as well_name,
drop_num,
crystal,
other,
date_imaged date_imaged_orig,
DATE_FORMAT(date_imaged, "%%m/%%d/%%Y") date_imaged,
CONCAT(s.url_prefix, relative_path) url,
temperature,
(
SELECT CAST(GROUP_CONCAT(manual_call ORDER BY disputed_at ASC) AS CHAR)
FROM image_scores i,
disputes d
WHERE i.id = d.image_score_id
AND ranked.id = d.image_score_id
AND ranked.source_id = i.source_id
) disputes,
(
SELECT CAST(GROUP_CONCAT(date_imaged ORDER BY date_imaged ASC) AS CHAR)
FROM image_scores i
WHERE ranked.source_id = i.source_id
AND ranked.plate_id = i.plate_id
AND ranked.well_num = i.well_num
AND ranked.drop_num = i.drop_num
) all_dates_imaged,
(
SELECT CAST(GROUP_CONCAT(crystal ORDER BY date_imaged ASC) AS CHAR)
FROM image_scores i
WHERE ranked.source_id = i.source_id
AND ranked.plate_id = i.plate_id
AND ranked.well_num = i.well_num
AND ranked.drop_num = i.drop_num
) all_crystal_scores,
(
SELECT CAST(GROUP_CONCAT(other ORDER BY date_imaged ASC) AS CHAR)
FROM image_scores i
WHERE ranked.source_id = i.source_id
AND ranked.plate_id = i.plate_id
AND ranked.well_num = i.well_num
AND ranked.drop_num = i.drop_num
) all_other_scores
FROM (SELECT id,
plate_id,
barcode,
plate_num,
well_num,
row_letter,
column_num,
drop_num,
crystal,
other,
date_imaged,
source_id,
relative_path,
temperature,
@pwd_rank := IF(@current_plate = plate_id AND @current_well = well_num AND @current_drop = drop_num, @pwd_rank + 1, 1) AS pwd_rank,
@current_plate := plate_id,
@current_well := well_num,
@current_drop := drop_num
FROM image_scores
WHERE source_id = %s
AND scored_by = %s
AND plate_id IN ({query_placeholders})
{temperature_filter}
{drop_filter}
{score_filter}
AND crystal IS NOT NULL
AND crystal > {min_score}
ORDER BY plate_id, well_num, drop_num, {rank_method} desc
) ranked,
sources s
WHERE pwd_rank <= 1
AND source_id = s.id
ORDER BY
CASE WHEN @sort_dir = 'plate_id asc' THEN plate_id END ASC,
CASE WHEN @sort_dir = 'plate_id desc' THEN plate_id END DESC,
CASE WHEN @sort_dir = 'well_num asc' THEN well_num END ASC,
CASE WHEN @sort_dir = 'well_num desc' THEN well_num END DESC,
CASE WHEN @sort_dir = 'drop_num asc' THEN drop_num END ASC,
CASE WHEN @sort_dir = 'drop_num desc' THEN drop_num END DESC,
CASE WHEN @sort_dir = 'barcode asc' THEN barcode END ASC,
CASE WHEN @sort_dir = 'barcode desc' THEN barcode END DESC,
CASE WHEN @sort_dir = 'crystal asc' THEN crystal END ASC,
CASE WHEN @sort_dir = 'crystal desc' THEN crystal END DESC,
CASE WHEN @sort_dir = 'other asc' THEN other END ASC,
CASE WHEN @sort_dir = 'other desc' THEN other END DESC,
CASE WHEN @sort_dir = 'date_imaged asc' THEN date_imaged_orig END ASC,
CASE WHEN @sort_dir = 'date_imaged desc' THEN date_imaged_orig END DESC,
CASE WHEN @sort_dir = 'temperature asc' THEN temperature END ASC,
CASE WHEN @sort_dir = 'temperature desc' THEN temperature END DESC,
crystal DESC
LIMIT %s
OFFSET %s
"""
# GET CONDITIONS ONE PLATE AT A TIME SINCE I CANNOT GET THE WHERE IN BIND PARAMETER TO WORK PROPERLY
sql_conditions = """
WITH a as (
SELECT Plate.ID AS plate_id,
Well.WellNumber AS well_num,
CONCAT(WellLayerIngredient.Concentration, ' ', IngredientStock.ConcentrationUnits, ' ',
Ingredient.ShortName,
(CASE
WHEN WellLayerIngredient.PH IS NOT NULL
THEN CONCAT(' pH ', WellLayerIngredient.PH) END)) AS condition
FROM Plate
JOIN Well ON Well.PlateID = Plate.ID
JOIN WellLayer on WellLayer.WellID = Well.ID
JOIN WellLayerIngredient on WellLayerIngredient.WellLayerID = WellLayer.ID
JOIN IngredientStock on WellLayerIngredient.IngredientStockID = IngredientStock.ID
JOIN Ingredient on IngredientStock.IngredientID = Ingredient.ID
WHERE Plate.ID = ?
)
SELECT plate_id,
well_num,
conditions = STUFF(
(
SELECT '; ' + condition
FROM a
WHERE a.plate_id = b.plate_id
AND a.well_num = b.well_num FOR XML PATH ('')
), 1, 2, '')
from a b
GROUP BY plate_id, well_num
"""
sql_conditions_protein = """
WITH a as (
SELECT Plate.ID AS plate_id,
Well.WellNumber AS well_num,
WellDrop.DropNumber AS drop_num,
TreeNode.Name AS protein,
ProteinFormulation.Notes AS protein_notes,
CONCAT(WellLayerIngredient.Concentration, ' ', IngredientStock.ConcentrationUnits, ' ',
Ingredient.ShortName,
(CASE
WHEN WellLayerIngredient.PH IS NOT NULL
THEN CONCAT(' pH ', WellLayerIngredient.PH) END)) AS condition
FROM Plate
JOIN Well ON Well.PlateID = Plate.ID
JOIN WellDrop ON Well.ID = WellDrop.WellID
JOIN ProteinFormulation ON WellDrop.ProteinFormulationID = ProteinFormulation.ID
JOIN TreeNode ON ProteinFormulation.TreeNodeID = TreeNode.ID
JOIN WellLayer on WellLayer.WellID = Well.ID
JOIN WellLayerIngredient on WellLayerIngredient.WellLayerID = WellLayer.ID
JOIN IngredientStock on WellLayerIngredient.IngredientStockID = IngredientStock.ID
JOIN Ingredient on IngredientStock.IngredientID = Ingredient.ID
WHERE Plate.ID = ?
)
SELECT plate_id,
well_num,
drop_num,
protein,
protein_notes,
conditions = STUFF(
(
SELECT '; ' + condition
FROM a
WHERE a.plate_id = b.plate_id
AND a.drop_num = b.drop_num
AND a.well_num = b.well_num FOR XML PATH ('')
), 1, 2, '')
from a b
GROUP BY plate_id, well_num, drop_num, protein, protein_notes
"""
sql_timecourse = """
SELECT scores.id,
plate_num,
plate_id,
barcode,
well_num,
drop_num,
crystal,
other,
date_imaged date_imaged_orig,
DATE_FORMAT(date_imaged, "%%m/%%d/%%Y") date_imaged,
CONCAT(s.url_prefix, relative_path) url
FROM image_scores scores, sources s
WHERE s.id = %s
AND scored_by = %s
AND plate_id = %s
AND well_num = %s
AND drop_num = %s
ORDER BY date_imaged_orig DESC
"""
sql_path_to_experiments_like_text = """
-- FIND PATH TO EXPERIMENTS MATCHING TEXT
WITH treecte(nodeid, Name, Type, ParentID, LEVEL, treepath)
AS (SELECT ID AS nodeid,
Name,
Type,
ParentID,
0 AS LEVEL,
CAST(Name AS VARCHAR(1024)) AS treepath
FROM TreeNode
WHERE ParentID = 3 -- stop at Projects folder
UNION ALL
SELECT tn.ID AS nodeid,
tn.Name,
tn.Type,
tn.ParentID,
PCTE.LEVEL + 1 AS LEVEL,
CAST(PCTE.treepath + ' > ' + CAST(tn.Name AS VARCHAR(1024)) AS VARCHAR(1024)) AS treepath
FROM TreeNode As tn
INNER JOIN treecte As PCTE ON PCTE.nodeid = tn.ParentID
WHERE tn.Type in ('Project', 'ProjectFolder', 'ProjectsFolder', 'Experiment', 'ExperimentPlate')
)
SELECT nodeid as id, Name as folder_name, treepath as path
FROM treecte
where Name like ?
and Type = 'Experiment'
ORDER BY treepath
"""
sql_path_to_experiments_via_nodeids = """
-- FIND PATH TO EXPERIMENTS GIVEN NODE IDS
WITH treecte(nodeid, Name, Type, ParentID, LEVEL, treepath)
AS (SELECT ID AS nodeid,
Name,
Type,
ParentID,
0 AS LEVEL,
CAST(Name AS VARCHAR(1024)) AS treepath
FROM TreeNode
WHERE ParentID = 3 -- stop at Projects folder
UNION ALL
SELECT tn.ID AS nodeid,
tn.Name,
tn.Type,
tn.ParentID,
PCTE.LEVEL + 1 AS LEVEL,
CAST(PCTE.treepath + ' > ' + CAST(tn.Name AS VARCHAR(1024)) AS VARCHAR(1024)) AS treepath
FROM TreeNode As tn
INNER JOIN treecte As PCTE ON PCTE.nodeid = tn.ParentID
WHERE tn.Type in ('Project', 'ProjectFolder', 'ProjectsFolder', 'Experiment', 'ExperimentPlate')
)
SELECT nodeid as id, Name as folder_name, treepath as path
FROM treecte
where nodeid in ({query_placeholders})
and Type = 'Experiment'
ORDER BY treepath
"""
sql_plates_from_experiment_nodes = """
select TreeNode.ParentID as parent_id,
TreeNode.ID as id,
Plate.ID as plate_id,
Plate.Barcode as barcode,
CONCAT(Plate.PlateNumber, ',', Plate.ID, ',', Plate.Barcode) as name
from Plate,
TreeNode
where Plate.TreeNodeID = TreeNode.ID
and TreeNode.ParentID in ({query_placeholders})
order by plate_id ASC
"""
sql_nodes_from_matching_plateid = """
select TreeNode.ID as id,
Plate.ID as plate_id,
Plate.Barcode as barcode,
CONCAT(Plate.PlateNumber, ',', Plate.ID, ',', Plate.Barcode) as name
from TreeNode,
Plate
where Plate.ID like ?
and Plate.TreeNodeID = TreeNode.ID
"""
sql_nodes_from_matching_barcode = """
select TreeNode.ID as id,
Plate.ID as plate_id,
Plate.Barcode as barcode,
CONCAT(Plate.PlateNumber, ',', Plate.ID, ',', Plate.Barcode) as name
from TreeNode,
Plate
where Plate.Barcode like ?
and Plate.TreeNodeID = TreeNode.ID
"""
sql_plates_from_matching_protein = """
WITH treecte(nodeid, Name, Type, ParentID, LEVEL, treepath)
AS (SELECT ID AS nodeid,
Name,
Type,
ParentID,
0 AS LEVEL,
CAST(Name AS VARCHAR(1024)) AS treepath
FROM TreeNode
WHERE ParentID = 3 -- stop at Projects folder
UNION ALL
SELECT tn.ID AS nodeid,
tn.Name,
tn.Type,
tn.ParentID,
PCTE.LEVEL + 1 AS LEVEL,
CAST(PCTE.treepath + ' > ' + CAST(tn.Name AS VARCHAR(1024)) AS VARCHAR(1024)) AS treepath
FROM TreeNode As tn
INNER JOIN treecte As PCTE ON PCTE.nodeid = tn.ParentID
WHERE tn.Type in ('Project', 'ProjectsFolder', 'Experiment', 'ExperimentPlate')
),
protein(protein_id, protein_name) as (select ProteinFormulation.ID,
TreeNode.name
from ProteinFormulation,
TreeNode
where TreeNode.name like ?
and ProteinFormulation.TreeNodeID = TreeNode.ID)
select distinct Plate.ID as plate_id,
TreeNode.ID as id,
protein.protein_name as folder_name,
treecte.treepath as path,
Plate.Barcode as barcode,
CONCAT(Plate.PlateNumber, ',', Plate.ID, ',', Plate.Barcode) as name
from protein,
treecte,
TreeNode,
Plate,
Experiment,
ProteinLayer PL,
ProteinLayerProteinFormulation PLPF,
ProteinFormulation PF
where protein.protein_id = PF.ID
and PF.ID = PLPF.ProteinFormulationID
and PL.ID = PLPF.ProteinLayerID
and Experiment.ID = PL.ExperimentID
and Experiment.ID = Plate.ExperimentID
and TreeNode.ID = Plate.TreeNodeID
and treecte.nodeid = TreeNode.ParentID
"""
sql_image_prefix = """
select s.url_prefix prefix
from sources s
where s.id = %s
"""
sql_other_images = """
SELECT Plate.Barcode AS barcode,
FORMAT(ImagingTask.DateImaged, 'MM/dd/yyyy') AS date_imaged,
ImagingTask.DateImaged AS date_imaged_orig,
WellDrop.DropNumber AS drop_num,
Plate.ID AS plate_id,
Plate.PlateNumber AS plate_num,
CONCAT(?, '/', Plate.ID % 1000, '/plateID_', Plate.ID, '/batchID_', ImageBatch.ID, '/wellNum_',
Well.WellNumber, '/profileID_', CaptureProfile.ID, '/d', WellDrop.DropNumber,
'_r', Region.ID, '_', ImageType.ShortName, CASE WHEN CaptureProfile.Name LIKE '%UV%' THEN '.png' ELSE '.jpg' END) AS url,
Well.WellNumber AS well_num,
CaptureProfile.Name AS image_type
FROM WellDrop
INNER JOIN Well ON Well.ID = WellDrop.WellID
INNER JOIN Plate ON Plate.ID = Well.PlateID
INNER JOIN Region ON Region.WellDropID = WellDrop.ID
INNER JOIN CaptureResult ON CaptureResult.REGIONID = REGION.ID
INNER JOIN CaptureProfileVersion ON CaptureProfileVersion.ID = CaptureResult.CaptureProfileVersionID
INNER JOIN CaptureProfile ON CaptureProfile.ID = CaptureProfileVersion.CaptureProfileID
INNER JOIN Image ON Image.CaptureResultID = CaptureResult.ID
INNER JOIN ImageType ON ImageType.ID = Image.ImageTypeID
INNER JOIN ImageStore on ImageStore.ID = Image.ImageStoreID
INNER JOIN ImageBatch on ImageBatch.ID = CAPTURERESULT.ImageBatchID
INNER JOIN ImagingTask on ImagingTask.ID = ImageBatch.ImagingTaskID
WHERE ImageType.ShortName = 'ef'
AND Plate.ID = ?
AND Well.WellNumber = ?
AND WellDrop.DropNumber = ?
ORDER BY date_imaged_orig DESC
"""
sql_image_info = """
SELECT CaptureProfile.Name AS image_type,
Image.PixelSize AS pixel_size
FROM Region
INNER JOIN CaptureResult ON CaptureResult.RegionID = Region.ID
INNER JOIN CaptureProfileVersion ON CaptureProfileVersion.ID = CaptureResult.CaptureProfileVersionID
INNER JOIN CaptureProfile ON CaptureProfile.ID = CaptureProfileVersion.CaptureProfileID
INNER JOIN Image ON Image.CaptureResultID = CaptureResult.ID
INNER JOIN ImageType ON ImageType.ID = Image.ImageTypeID
INNER JOIN ImageStore on ImageStore.ID = Image.ImageStoreID
INNER JOIN ImageBatch on ImageBatch.ID = CaptureResult.ImageBatchID
INNER JOIN ImagingTask on ImagingTask.ID = ImageBatch.ImagingTaskID
WHERE ImageType.ShortName = 'ef'
AND Region.ID = ?
AND ImageBatch.ID = ?
AND CaptureProfile.ID = ?
"""
| 43.226937
| 148
| 0.524564
|
44bb75e33eb528db7e8707076619f026cf5ecb82
| 772
|
py
|
Python
|
setup.py
|
nschneid/DepEdit
|
8840bd843ba6ccc308483bb05de92f60ecdce06c
|
[
"Apache-2.0"
] | 1
|
2021-05-13T18:16:20.000Z
|
2021-05-13T18:16:20.000Z
|
setup.py
|
nschneid/DepEdit
|
8840bd843ba6ccc308483bb05de92f60ecdce06c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
nschneid/DepEdit
|
8840bd843ba6ccc308483bb05de92f60ecdce06c
|
[
"Apache-2.0"
] | null | null | null |
from distutils.core import setup
setup(
name = 'depedit',
packages = ['depedit'],
version = '2.0.0',
description = 'A simple configurable tool for manipulating dependency trees',
author = 'Amir Zeldes',
author_email = 'amir.zeldes@georgetown.edu',
url = 'https://github.com/amir-zeldes/depedit',
license='Apache License, Version 2.0',
download_url = 'https://github.com/amir-zeldes/depedit/releases/tag/2.0.0',
keywords = ['NLP', 'parsing', 'syntax', 'dependencies', 'dependency', 'tree', 'treebank', 'conll', 'conllu', 'ud'],
classifiers = ['Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent'],
)
| 40.631579
| 117
| 0.686528
|
3fea3f9b4c9767d0f85e899263a675d9b22b9f38
| 691
|
py
|
Python
|
pythonUtils/ExploreDA/Statistics/cat_statistics.py
|
tgquintela/pythonUtils
|
6f2e5ba3be67a48d3cd5cf72dcabfae04cfa7afe
|
[
"MIT"
] | 1
|
2015-07-21T05:15:11.000Z
|
2015-07-21T05:15:11.000Z
|
pythonUtils/ExploreDA/Statistics/cat_statistics.py
|
tgquintela/pythonUtils
|
6f2e5ba3be67a48d3cd5cf72dcabfae04cfa7afe
|
[
"MIT"
] | null | null | null |
pythonUtils/ExploreDA/Statistics/cat_statistics.py
|
tgquintela/pythonUtils
|
6f2e5ba3be67a48d3cd5cf72dcabfae04cfa7afe
|
[
"MIT"
] | null | null | null |
"""
Categorical variable statistics
-------------------------------
Module which groups all the functions needed to compute the statistics and the
description of the categorical variables.
"""
## Categorical count
def cat_count(df, variable):
"""The catagory counts.
Parameters
----------
df: pd.DataFrame
the data in dataframe form.
variable: str
the variable of the database we want to study.
Returns
-------
counts: pd.DataFrame
the counts of the possible categories in the categorical variable.
"""
if type(variable) == list:
variable = variable[0]
counts = df[variable].value_counts()
return counts
| 21.59375
| 78
| 0.628075
|
062767db1c1672c95ffa361a4993646db617faa4
| 3,169
|
py
|
Python
|
test/vanilla/legacy/Expected/AcceptanceTests/Report/report/_auto_rest_report_service.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 35
|
2018-04-03T12:15:53.000Z
|
2022-03-11T14:03:34.000Z
|
test/vanilla/legacy/Expected/AcceptanceTests/Report/report/_auto_rest_report_service.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 652
|
2017-08-28T22:44:41.000Z
|
2022-03-31T21:20:31.000Z
|
test/vanilla/legacy/Expected/AcceptanceTests/Report/report/_auto_rest_report_service.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 29
|
2017-08-28T20:57:01.000Z
|
2022-03-11T14:03:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import AutoRestReportServiceConfiguration
from .operations import AutoRestReportServiceOperationsMixin
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.rest import HttpRequest, HttpResponse
class AutoRestReportService(AutoRestReportServiceOperationsMixin):
"""Test Infrastructure for AutoRest.
:param base_url: Service URL. Default value is 'http://localhost:3000'.
:type base_url: str
"""
def __init__(
self,
base_url="http://localhost:3000", # type: str
**kwargs # type: Any
):
# type: (...) -> None
self._config = AutoRestReportServiceConfiguration(**kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
def _send_request(
self,
request, # type: HttpRequest
**kwargs # type: Any
):
# type: (...) -> HttpResponse
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AutoRestReportService
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 36.848837
| 99
| 0.649732
|
be42a1d3452a9f00e1774ee46a2af222343872cd
| 635
|
py
|
Python
|
Python/Logic_2/close_far.py
|
RCoon/CodingBat
|
c5004c03e668c62751dc7f13154c79e25ea34339
|
[
"MIT"
] | 1
|
2015-11-06T02:26:50.000Z
|
2015-11-06T02:26:50.000Z
|
Python/Logic_2/close_far.py
|
RCoon/CodingBat
|
c5004c03e668c62751dc7f13154c79e25ea34339
|
[
"MIT"
] | null | null | null |
Python/Logic_2/close_far.py
|
RCoon/CodingBat
|
c5004c03e668c62751dc7f13154c79e25ea34339
|
[
"MIT"
] | null | null | null |
# Given three ints, a b c, return True if one of b or c is "close" (differing
# from a by at most 1), while the other is "far", differing from both other
# values by 2 or more. Note: abs(num) computes the absolute value of a number.
# close_far(1, 2, 10) --> True
# close_far(1, 2, 3) --> False
# close_far(4, 1, 3) --> True
def close_far(a, b, c):
a_b_diff = abs(a - b)
a_c_diff = abs(a - c)
b_c_diff = abs(b - c)
return (a_b_diff <= 1 and (b_c_diff >= 2 and a_c_diff >= 2) or
a_c_diff <= 1 and (b_c_diff >= 2 and a_b_diff >= 2))
print(close_far(1, 2, 10))
print(close_far(1, 2, 3))
print(close_far(4, 1, 3))
| 31.75
| 79
| 0.623622
|
68ddcbddd5c9dac3329e011e8d640f05e61b478a
| 16,869
|
py
|
Python
|
tests/test_anyio.py
|
lagerdata/asks
|
44dc355db729384f03c332fdfbc5ab7cf2af0b71
|
[
"MIT"
] | null | null | null |
tests/test_anyio.py
|
lagerdata/asks
|
44dc355db729384f03c332fdfbc5ab7cf2af0b71
|
[
"MIT"
] | null | null | null |
tests/test_anyio.py
|
lagerdata/asks
|
44dc355db729384f03c332fdfbc5ab7cf2af0b71
|
[
"MIT"
] | null | null | null |
# pylint: disable=wrong-import-position
import ssl
from os import path
from functools import partial
from pathlib import Path
import pytest
from anyio import create_task_group, aopen
import curio
from overly import (
Server,
ssl_socket_wrapper,
default_ssl_cert,
send_200,
send_303,
send_400,
send_500,
delay,
send_request_as_json,
accept_cookies_and_respond,
send_gzip,
send_deflate,
send_chunked,
send_200_blank_headers,
finish,
HttpMethods,
)
import asks
from asks.errors import TooManyRedirects, BadStatus, RequestTimeout
_TEST_LOC = ("localhost", 25001)
_SSL_CONTEXT = ssl.create_default_context(cadata=default_ssl_cert)
def curio_run(func):
def func_wrapper(*args, **kwargs):
kernel = curio.Kernel()
kernel.run(func(*args, **kwargs))
kernel.run(shutdown=True)
return func_wrapper
@Server(_TEST_LOC, steps=[send_200, finish])
@curio_run
async def test_http_get(server):
r = await asks.get(server.http_test_url)
assert r.status_code == 200
# GET tests
@Server(_TEST_LOC, steps=[send_200, finish], socket_wrapper=ssl_socket_wrapper)
@curio_run
async def test_https_get(server):
# If we use ssl_context= to trust the CA, then we can successfully do a
# GET over https.
r = await asks.get(server.https_test_url, ssl_context=_SSL_CONTEXT)
assert r.status_code == 200
@Server(_TEST_LOC, steps=[send_200, finish], socket_wrapper=ssl_socket_wrapper)
@curio_run
async def test_https_get_checks_cert(server):
try:
expected_error = ssl.SSLCertVerificationError
except AttributeError:
# If we're running in Python <3.7, we won't have the specific error
# that will be raised, but we can expect it to raise an SSLError
# nonetheless
expected_error = ssl.SSLError
# The server's certificate isn't signed by any real CA. By default, asks
# should notice that, and raise an error.
with pytest.raises(expected_error):
await asks.get(server.https_test_url)
# @curio_run
# async def test_bad_www_and_schema_get():
# r = await asks.get('http://reddit.com')
# assert r.status_code == 200
@Server(_TEST_LOC, steps=[send_400, finish])
@curio_run
async def test_http_get_client_error(server):
r = await asks.get(server.http_test_url)
with pytest.raises(BadStatus) as excinfo:
r.raise_for_status()
assert excinfo.match("400 Client Error: BAD REQUEST")
assert excinfo.value.status_code == 400
@Server(_TEST_LOC, steps=[send_500, finish])
@curio_run
async def test_http_get_server_error(server):
r = await asks.get(server.http_test_url)
with pytest.raises(BadStatus) as excinfo:
r.raise_for_status()
assert excinfo.match("500 Server Error: INTERNAL SERVER ERROR")
assert excinfo.value.status_code == 500
# Redirect tests
@Server(
_TEST_LOC,
max_requests=4,
steps=[
[(HttpMethods.GET, "/redirect_1"), send_303, finish],
[(HttpMethods.GET, "/"), send_200, finish],
[(HttpMethods.GET, "/redirect_1"), send_303, finish],
[(HttpMethods.GET, "/"), send_200, finish],
],
ordered_steps=True,
)
@curio_run
async def test_http_redirect(server):
r = await asks.get(server.http_test_url + "/redirect_1")
assert len(r.history) == 1
# make sure history doesn't persist across responses
r.history.append("not a response obj")
r = await asks.get(server.http_test_url + "/redirect_1")
assert len(r.history) == 1
@Server(
_TEST_LOC,
max_requests=3,
steps=[
[
(HttpMethods.GET, "/redirect_max"),
partial(send_303, headers=[("location", "redirect_max1")]),
finish,
],
[
(HttpMethods.GET, "/redirect_max1"),
partial(send_303, headers=[("location", "redirect_max")]),
finish,
],
],
)
@curio_run
async def test_http_max_redirect_error(server):
with pytest.raises(TooManyRedirects):
await asks.get(server.http_test_url + "/redirect_max", max_redirects=1)
@Server(
_TEST_LOC,
max_requests=2,
steps=[
[
(HttpMethods.GET, "/path/redirect"),
partial(send_303, headers=[("location", "../foo/bar")]),
finish,
],
[(HttpMethods.GET, "/foo/bar"), send_200, finish],
],
)
@curio_run
async def test_redirect_relative_url(server):
r = await asks.get(server.http_test_url + "/path/redirect", max_redirects=1)
assert len(r.history) == 1
assert r.url == "http://{0}:{1}/foo/bar".format(*_TEST_LOC)
@Server(
_TEST_LOC,
max_requests=2,
steps=[
[
(HttpMethods.GET, "/redirect_once"),
partial(send_303, headers=[("location", "/")]),
finish,
],
[(HttpMethods.GET, "/"), send_200, finish],
],
)
@curio_run
async def test_http_under_max_redirect(server):
r = await asks.get(server.http_test_url + "/redirect_once", max_redirects=2)
assert r.status_code == 200
@Server(
_TEST_LOC,
max_requests=1,
steps=[
[
(HttpMethods.GET, "/redirect_once"),
partial(send_303, headers=[("location", "/")]),
finish,
],
],
)
@curio_run
async def test_dont_follow_redirects(server):
r = await asks.get(server.http_test_url + "/redirect_once", follow_redirects=False)
assert r.status_code == 303
assert r.headers["location"] == "/"
# Timeout tests
@Server(_TEST_LOC, steps=[delay(2), send_200, finish])
@curio_run
async def test_http_timeout_error(server):
with pytest.raises(RequestTimeout):
await asks.get(server.http_test_url, timeout=1)
@Server(_TEST_LOC, steps=[send_200, finish])
@curio_run
async def test_http_timeout(server):
r = await asks.get(server.http_test_url, timeout=10)
assert r.status_code == 200
# Param set test
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_param_dict_set(server):
r = await asks.get(server.http_test_url, params={"cheese": "the best"})
j = r.json()
assert next(v == "the best" for k, v in j["params"] if k == "cheese")
# Data set test
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_data_dict_set(server):
r = await asks.post(server.http_test_url, data={"cheese": "please bby"})
j = r.json()
assert next(v == "please bby" for k, v in j["form"] if k == "cheese")
# Cookie send test
@Server(_TEST_LOC, steps=[accept_cookies_and_respond, finish])
@curio_run
async def test_cookie_dict_send(server):
cookies = {"Test-Cookie": "Test Cookie Value", "koooookie": "pie"}
r = await asks.get(server.http_test_url, cookies=cookies)
for cookie in r.cookies:
assert cookie.name in cookies
if " " in cookie.value:
assert cookie.value == '"' + cookies[cookie.name] + '"'
else:
assert cookie.value == cookies[cookie.name]
# Custom headers test
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_header_set(server):
r = await asks.get(
server.http_test_url, headers={"Asks-Header": "Test Header Value"}
)
j = r.json()
assert any(k == "asks-header" for k, _ in j["headers"])
assert "cOntenT-tYPe" in r.headers
# File send test
TEST_DIR = path.dirname(path.abspath(__file__))
TEST_FILE1 = path.join(TEST_DIR, "test_file1.txt")
TEST_FILE2 = path.join(TEST_DIR, "test_file2")
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_file_send_single(server):
r = await asks.post(server.http_test_url, files={"file_1": TEST_FILE1})
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
file_data = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data["file"] == "Compooper"
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_file_send_double(server):
r = await asks.post(
server.http_test_url, files={"file_1": TEST_FILE1, "file_2": TEST_FILE2}
)
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
assert any(file_data["name"] == "file_2" for file_data in j["files"])
file_data_1 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
file_data_2 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_2"
)
assert file_data_1["file"] == "Compooper"
assert file_data_2["file"] == "My slug <3"
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_file_send_file_and_form_data(server):
r = await asks.post(
server.http_test_url,
files={"file_1": TEST_FILE1, "data_1": "watwatwatwat=yesyesyes"},
)
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
assert any(form_data["name"] == "data_1" for form_data in j["forms"])
file_data_1 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data_1["file"] == "Compooper"
form_data_1 = next(
form_data for form_data in j["forms"] if form_data["name"] == "data_1"
)
assert form_data_1["form_data"] == "watwatwatwat=yesyesyes"
# File send test new multipart API
TEST_DIR = path.dirname(path.abspath(__file__))
TEST_FILE1 = path.join(TEST_DIR, "test_file1.txt")
TEST_FILE2 = path.join(TEST_DIR, "test_file2")
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_multipart_send_single(server):
r = await asks.post(server.http_test_url, multipart={"file_1": Path(TEST_FILE1)})
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
file_data = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data["file"] == "Compooper"
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_multipart_send_single_already_open(server):
with open(TEST_FILE1, "rb") as f:
r = await asks.post(server.http_test_url, multipart={"file_1": f})
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
file_data = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data["file"] == "Compooper"
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_multipart_send_single_already_open_async(server):
async with await aopen(TEST_FILE1, "rb") as f:
r = await asks.post(server.http_test_url, multipart={"file_1": f})
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
file_data = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data["file"] == "Compooper"
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_multipart_send_raw_bytes(server):
r = await asks.post(
server.http_test_url,
multipart={
"file_1": asks.multipart.MultipartData(
b"Compooper", basename="in_memory.txt",
)
},
)
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
file_data = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data["file"] == "Compooper"
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_multipart_send_double(server):
r = await asks.post(
server.http_test_url,
multipart={"file_1": Path(TEST_FILE1), "file_2": Path(TEST_FILE2)},
)
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
assert any(file_data["name"] == "file_2" for file_data in j["files"])
file_data_1 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
file_data_2 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_2"
)
assert file_data_1["file"] == "Compooper"
assert file_data_2["file"] == "My slug <3"
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_multipart_send_file_and_form_data(server):
r = await asks.post(
server.http_test_url,
multipart={"file_1": Path(TEST_FILE1), "data_1": "watwatwatwat=yesyesyes"},
)
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
assert any(form_data["name"] == "data_1" for form_data in j["forms"])
file_data_1 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data_1["file"] == "Compooper"
form_data_1 = next(
form_data for form_data in j["forms"] if form_data["name"] == "data_1"
)
assert form_data_1["form_data"] == "watwatwatwat=yesyesyes"
# JSON send test
@Server(_TEST_LOC, steps=[send_request_as_json, finish])
@curio_run
async def test_json_send(server):
r = await asks.post(
server.http_test_url, json={"key_1": True, "key_2": "cheesestring"}
)
j = r.json()
json_1 = next(iter(j["json"]))
assert json_1["json"]["key_1"] is True
assert json_1["json"]["key_2"] == "cheesestring"
# Test decompression
@Server(_TEST_LOC, steps=[partial(send_gzip, data="wolowolowolo"), finish])
@curio_run
async def test_gzip(server):
r = await asks.get(server.http_test_url)
assert r.text == "wolowolowolo"
@Server(_TEST_LOC, steps=[partial(send_deflate, data="wolowolowolo"), finish])
@curio_run
async def test_deflate(server):
r = await asks.get(server.http_test_url)
assert r.text == "wolowolowolo"
# Test chunks and streaming
@Server(_TEST_LOC, steps=[partial(send_chunked, data=["ham "] * 10), finish])
@curio_run
async def test_chunked(server):
r = await asks.get(server.http_test_url)
assert r.text == "ham ham ham ham ham ham ham ham ham ham "
@Server(_TEST_LOC, steps=[partial(send_chunked, data=["ham "] * 10), finish])
@curio_run
async def test_stream(server):
data = b""
r = await asks.get(server.http_test_url, stream=True)
async for chunk in r.body:
data += chunk
assert data == b"ham ham ham ham ham ham ham ham ham ham "
# Test callback
@Server(_TEST_LOC, steps=[partial(send_chunked, data=["ham "] * 10), finish])
@curio_run
async def test_callback(server):
async def callback_example(chunk):
nonlocal callback_data
callback_data += chunk
callback_data = b""
await asks.get(server.http_test_url, callback=callback_example)
assert callback_data == b"ham ham ham ham ham ham ham ham ham ham "
# Test connection close without content-length and transfer-encoding
@Server(
_TEST_LOC,
steps=[partial(send_200_blank_headers, headers=[("connection", "close")]), finish],
)
@curio_run
async def test_connection_close_no_content_len(server):
r = await asks.get(server.http_test_url)
assert r.text == "200"
# Session Tests
# =============
# Test Session with two pooled connections on ten get requests.
@Server(
_TEST_LOC,
steps=[partial(send_200_blank_headers, headers=[("connection", "close")]), finish],
max_requests=10,
)
@curio_run
async def test_session_smallpool(server):
async def worker(s):
r = await s.get(path="/get")
assert r.status_code == 200
s = asks.Session(server.http_test_url, connections=2)
async with create_task_group() as g:
for _ in range(10):
await g.spawn(worker, s)
# Test stateful Session
# TODO check the "" quoting of cookies here (probably in overly)
@Server(_TEST_LOC, steps=[accept_cookies_and_respond, finish])
@curio_run
async def test_session_stateful(server):
s = asks.Session(server.http_test_url, persist_cookies=True)
await s.get(cookies={"Test-Cookie": "Test Cookie Value"})
assert ":".join(str(x) for x in _TEST_LOC) in s._cookie_tracker.domain_dict.keys()
assert (
s._cookie_tracker.domain_dict[":".join(str(x) for x in _TEST_LOC)][0].value
== '"Test Cookie Value"'
)
# Test session instantiates outside event loop
def test_instantiate_session_outside_of_event_loop():
try:
asks.Session()
except RuntimeError:
pytest.fail("Could not instantiate Session outside of event loop")
@curio_run
async def test_session_unknown_kwargs():
with pytest.raises(TypeError, match="request\(\) got .*"):
session = asks.Session("https://httpbin.org/get")
await session.request("GET", ko=7, foo=0, bar=3, shite=3)
pytest.fail("Passing unknown kwargs does not raise TypeError")
| 27.882645
| 87
| 0.668267
|
158b12cf09633b7b84df201608b2b0f1bfb658d7
| 2,669
|
py
|
Python
|
main.py
|
hacker1383/dow
|
376bd6ca6ae03ec88aba27dc4e83330b52d4aab1
|
[
"MIT"
] | null | null | null |
main.py
|
hacker1383/dow
|
376bd6ca6ae03ec88aba27dc4e83330b52d4aab1
|
[
"MIT"
] | null | null | null |
main.py
|
hacker1383/dow
|
376bd6ca6ae03ec88aba27dc4e83330b52d4aab1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import telebot
import re
from urllib import urlretrieve as dw
import sys
import os
#import color
reload(sys)
sys.setdefaultencoding("utf-8")
bot = telebot.TeleBot('266344988:AAHcpmC8kCgQuKjYzett08jdRaDJePIXDwM')
admin = 119296662
botusername = 'DOWNLOADERROBOT'
@bot.message_handler(func=lambda m: m.text)
def n(m):
text = m.text
id = m.from_user.id
print 'Text : \033[32m{}\nID : \033[31m{}'.format(text,id)
if re.match('^/(id|who)$',text):
bot.send_message(m.chat.id, m.from_user.id)
if re.match('^/(help|start)$',text):
bot.send_message(m.chat.id, """
1> /id
2> <code>send url png|jpg|zip</code>
3> #Soon
""",parse_mode='HTML')
if m.chat.type == 'private':
if re.match('(ftp|http)://.*\.(png)$',text):
bot.send_message(m.chat.id, 'ok wait')
dw(text,'s.png')
bot.send_photo(m.chat.id, open('s.png'))
os.remove('s.png')
if re.match('(ftp|http|https)://.*\.(jpg)$',text):
bot.send_message(m.chat.id, 'ok wait')
dw(text,'s.jpg')
bot.send_photo(m.chat.id, open('s.jpg'))
os.remove('s.jpg')
if re.match('(ftp|http|https)://.*\.(zip)$',text):
bot.send_message(m.chat.id, 'ok wait')
dw(text,'file.zip')
bot.send_photo(m.chat.id, open('file.zip'))
os.remove('file.zip')
if m.chat.type == 'group' or m.chat.type == 'supergroup':
if m.reply_to_message:
if m.reply_to_message.from_user.username == botusername:
if re.match('(ftp|http|https)://.*\.(png)$',text):
bot.send_message(m.chat.id, 'ok wait')
dw(text,'s.png')
bot.send_photo(m.chat.id, open('s.png'))
os.remove('s.png')
if re.match('(ftp|http|https)://.*\.(jpg)$',text): #
bot.send_message(m.chat.id, 'ok wait') # pic download File (Group by reply)
dw(text,'s.jpg') #
bot.send_photo(m.chat.id, open('s.jpg'))
os.remove('s.jpg')
print 'Remove jpg file'
if re.match('(ftp|http|https)://.*\.(zip)$',text):
bot.send_message(m.chat.id, 'ok wait') #
dw(text,'file.zip') # zip files
bot.send_photo(m.chat.id, open('file.zip')) #
os.remove('file.zip')
print 'Remove zip file'
bot.polling(True)
| 39.25
| 108
| 0.499438
|
b30cd28cd2787611334e8b459c45978f4477d29d
| 3,294
|
py
|
Python
|
Load_Database.py
|
dl-stuff/dl-datamine
|
aae37710d2525aaa2b83f809e908be67f074c2d2
|
[
"MIT"
] | 3
|
2020-04-29T12:35:33.000Z
|
2022-03-22T20:08:22.000Z
|
Load_Database.py
|
dl-stuff/dl-datamine
|
aae37710d2525aaa2b83f809e908be67f074c2d2
|
[
"MIT"
] | 1
|
2020-10-23T00:08:35.000Z
|
2020-10-29T04:10:35.000Z
|
Load_Database.py
|
dl-stuff/dl-datamine
|
aae37710d2525aaa2b83f809e908be67f074c2d2
|
[
"MIT"
] | 4
|
2020-04-05T15:09:08.000Z
|
2020-10-21T15:08:34.000Z
|
import os
import argparse
from time import monotonic
import json
from loader.AssetExtractor import Extractor
from loader.Database import DBManager
from loader.Master import load_master, load_json
from loader.Actions import load_actions
from loader.Motion import load_motions
from loader.Aiscript import load_aiscript
from loader.UISkillDetail import load_ui_skill_detail
JP = "jp"
EN = "en"
CN = "cn"
MASTER = "master"
ACTIONS = "actions"
TEXT_LABEL = "TextLabel.json"
LABEL_PATTERNS_EN = {
r"^master$": "master",
r"^ui/skilldetail/skilldetail": "skilldetail",
}
LABEL_PATTERNS_CN = {
r"^master$": "master",
r"^ui/skilldetail/skilldetail": "skilldetail",
}
LABEL_PATTERNS_JP = {
r"^master$": "master",
r"^actions$": "actions",
r"^aiscript$": "aiscript",
r"^characters/motion": "motion",
r"characters/motion/animationclips$": "motion",
r"^dragon/motion": "motion",
r"^assets/_gluonresources/meshes/dragon": "motion",
r"^ui/skilldetail/skilldetail": "skilldetail",
}
LABEL_PATTERNS = {
JP: LABEL_PATTERNS_JP,
EN: LABEL_PATTERNS_EN,
CN: LABEL_PATTERNS_CN,
}
def extract_story_function_json(ex):
filenames = ("function", "function_namelist_notedit")
storynames = {}
ex.download_and_extract_by_pattern({"jp": {f"^story/{fn}": None for fn in filenames}})
for fn in filenames:
ex_path = os.path.join(ex.ex_dir, "jp", "story", f"{fn}.json")
with open(ex_path) as func:
data = json.load(func)["functions"][0]["variables"]
for k, v in zip(data["entriesKey"], data["entriesValue"]):
if not k:
continue
storynames[k] = v
with open("./out/_storynames.json", "w") as out:
json.dump(
storynames,
out,
indent=4,
ensure_ascii=False,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Import data to database.")
parser.add_argument("--do_prep", help="Download and extract db related assets", action="store_true")
parser.add_argument("-m_hash", help="Use", action="store_true")
parser.add_argument("-o", type=str, help="output file", default="dl.sqlite")
args = parser.parse_args()
start = monotonic()
dl_dir = "./_dl_sim"
in_dir = "./_ex_sim"
if args.do_prep:
ex = Extractor(dl_dir=dl_dir, ex_dir=in_dir, ex_img_dir=None, overwrite=True)
if not os.path.isdir(in_dir):
ex.download_and_extract_by_pattern(LABEL_PATTERNS)
else:
ex.download_and_extract_by_pattern_diff(LABEL_PATTERNS)
load_aiscript(os.path.join(in_dir, "jp", "aiscript"))
# extract_story_function_json(ex)
db = DBManager(args.o)
load_master(db, os.path.join(in_dir, EN, MASTER))
load_json(db, os.path.join(in_dir, JP, MASTER, TEXT_LABEL), "TextLabelJP")
load_json(db, os.path.join(in_dir, CN, MASTER, TEXT_LABEL), "TextLabelCN")
schema_map = load_actions(db, os.path.join(in_dir, JP, ACTIONS))
os.makedirs("out", exist_ok=True)
with open("./out/_action_schema.json", "w") as f:
json.dump(schema_map, f, indent=4, sort_keys=True)
load_motions(db, os.path.join(in_dir, JP))
load_ui_skill_detail(db, in_dir)
print(f"total: {monotonic()-start:.4f}s")
| 33.612245
| 104
| 0.664845
|
a828bba1626a76ff9ad65be4147aebccf2d10e8d
| 1,152
|
py
|
Python
|
macros_ffn/01_cube_process_tiffstacks.py
|
jeffkinnison/HappyNeuron
|
66ad1c3dc8fc89b518fe74e8318c5ba6d79b8f0a
|
[
"BSD-Source-Code"
] | 2
|
2020-01-31T12:21:57.000Z
|
2021-09-16T09:20:10.000Z
|
macros_ffn/01_cube_process_tiffstacks.py
|
jeffkinnison/HappyNeuron
|
66ad1c3dc8fc89b518fe74e8318c5ba6d79b8f0a
|
[
"BSD-Source-Code"
] | null | null | null |
macros_ffn/01_cube_process_tiffstacks.py
|
jeffkinnison/HappyNeuron
|
66ad1c3dc8fc89b518fe74e8318c5ba6d79b8f0a
|
[
"BSD-Source-Code"
] | 1
|
2021-09-16T09:33:12.000Z
|
2021-09-16T09:33:12.000Z
|
import os
import glob
import dxchange
import numpy as np
import h5py
print ("Working Dir")
print (os.getcwd())
cube = dxchange.read_tiff_stack('cube02/z01.tif',np.arange(1,50)) #raw data 8bit, change "256" to # of sections
labels = dxchange.read_tiff_stack('labels02/l01.tif',np.arange(1,50)) #label "ground truth" uint 8 or 32
print ('Cube Properties!')
print (cube.dtype)
print (cube.shape)
print ('Mean : '+str(cube.mean()))
print ('Std : '+str(cube.std()))
print ('Labels Properties!')
print (labels.dtype)
print (labels.shape)
print ('Ids Properties!')
ids = np.unique(labels,return_counts=1)
print (ids)
#raf added here to pad256
# cube = np.pad(cube,((115,116),(0,0),(0,0)),'reflect')
# labels = np.pad(labels,((115,116),(0,0),(0,0)),'reflect')
# print ('Cube Properties!')
# print (cube.dtype)
# print (cube.shape)
# print (cube.mean(),cube.std())
# print ('Labels Properties!')
# print (labels.dtype)
# print (labels.shape)
# print (labels.mean())
h5file = h5py.File('training_data_02.h5', 'w')
h5file.create_dataset('image',data=cube)
h5file.create_dataset('labels',data=labels)
h5file.close()
print ("Finished!! Goodbye!!")
| 22.153846
| 111
| 0.688368
|
ee228df6ab55d3d0b7da649aaaed0424561ee4c3
| 660
|
py
|
Python
|
check_in_solution.py
|
k15siyuen/astr-119-hw-1
|
ace1a3036fa47c91623ed093d5a4f4d6a3dc2d51
|
[
"MIT"
] | null | null | null |
check_in_solution.py
|
k15siyuen/astr-119-hw-1
|
ace1a3036fa47c91623ed093d5a4f4d6a3dc2d51
|
[
"MIT"
] | 1
|
2018-10-09T17:31:38.000Z
|
2018-10-18T16:48:08.000Z
|
check_in_solution.py
|
k15siyuen/astr-119-hw-1
|
ace1a3036fa47c91623ed093d5a4f4d6a3dc2d51
|
[
"MIT"
] | 1
|
2018-10-18T01:55:19.000Z
|
2018-10-18T01:55:19.000Z
|
#include the Numpy Library
import numpy as np
#define the main() function
def main():
i = 0 #declare i, initialize to 0
x = 119.0 #declare a float x which can be any number depending on precision of computer
for i in range(120): #loop i from 0 to 119 inclusive
if((1%2)==0): #if i is even
x += 3 #add 3 to x -- x = x+3
else: #if i is odd
x -= 5 #subtract 5 from x
s = "%3.2e" % x #makes a string s containing the value of x in sci notation
#with 2 decimal places showing
print(s) #print s to the screen
#now the rest of the program continues
if __name__ == "__main__": #call main
main() #run the main function
| 26.4
| 88
| 0.64697
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.