hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a05dba10eba0cd46002ae358afad66ab3fbd5d0
| 16,627
|
py
|
Python
|
ceasiompy/BalanceUnconventional/func/AinFunc/getdatafromcpacs.py
|
lverdier1/CEASIOMpy
|
178d44b26ba1d9249928823c3896f7ad85d19de8
|
[
"Apache-2.0"
] | null | null | null |
ceasiompy/BalanceUnconventional/func/AinFunc/getdatafromcpacs.py
|
lverdier1/CEASIOMpy
|
178d44b26ba1d9249928823c3896f7ad85d19de8
|
[
"Apache-2.0"
] | null | null | null |
ceasiompy/BalanceUnconventional/func/AinFunc/getdatafromcpacs.py
|
lverdier1/CEASIOMpy
|
178d44b26ba1d9249928823c3896f7ad85d19de8
|
[
"Apache-2.0"
] | null | null | null |
"""
CEASIOMpy: Conceptual Aircraft Design Software
Developed for CFS ENGINEERING, 1015 Lausanne, Switzerland
This programm will read the xml file created by the weight module or
the xml file in the cpacs file formatinside the ToolInput folder.
The cpacs file Must contain the:
* maximum_take_off_mass --In.: Maximum take off mass
* mass_fuel_max --In.: Maximum fuel mass
* mass_fuel_maxpass --In.: Maximum fuel with max passengers
* operating_empty_mass --In.: Operating empty mass
* mass_payload --In.: Payload mass
The cpacs file Should also contain:
* WING_MOUNTED --In.: True if the engine are placed on the rear of the
aircraft.
| Works with Python 2.7
| Author : Stefano Piccini
| Date of creation: 2018-12-05
| Last modifiction: 2019-08-29 (AJ)
"""
#=============================================================================
# IMPORTS
#=============================================================================
import numpy as np
from ceasiompy.utils.ceasiomlogger import get_logger
from ceasiompy.utils.cpacsfunctions import open_tixi,open_tigl, close_tixi, \
create_branch
log = get_logger(__file__.split('.')[0])
#=============================================================================
# CLASSES
#=============================================================================
"""All classes are defined inside the classes folder and into the
InputClasses/Uconventional folder"""
#=============================================================================
# FUNCTIONS
#=============================================================================
def get_user_fuel(fus_nb, ui, cpacs_in):
""" Function to extract from the xml file the required input data,
the code will use the default value when they are missing.
INPUT
(int) fus_nb --Arg.: Number of fuselage.
(class) ui --Arg.: UserInputs class.
##======= Classes are defined in the InputClasses folder =======##
(char) cpacs_in --Arg.: Relative location of the xml file in the
ToolInput folder (cpacs option) or
relative location of the temp. xml file in
the ToolOutput folder (input option).
OUTPUT
(class) ui --Out.: UserInputs class.
(file) cpacs_in --Out.: Updated cpasc file
"""
log.info('Starting data extraction from CPACS file')
# Path creation ==========================================================
tixi = open_tixi(cpacs_in)
FUEL_PATH = '/cpacs/toolspecific/CEASIOMpy/fuels'
create_branch(tixi, FUEL_PATH, False)
if fus_nb:
for i in range(0, fus_nb):
if fus_nb > 1:
F = 'fuelOnCabin' + str(i+1)
else:
F = 'fuelOnCabin'
print((FUEL_PATH + '/' + F))
if not tixi.checkElement(FUEL_PATH + '/' + F):
tixi.createElement(FUEL_PATH, F)
tixi.updateDoubleElement(FUEL_PATH + '/' + F,\
ui.F_FUEL[i], '%g')
else:
ui.F_FUEL[i] = tixi.getDoubleElement(FUEL_PATH + '/' + F)
else:
if not tixi.checkElement(FUEL_PATH + '/fuelOnCabin'):
tixi.createElement(FUEL_PATH, 'fuelOnCabin')
tixi.updateDoubleElement(FUEL_PATH + '/fuelOnCabin',\
ui.FUEL_ON_CABIN, '%g')
else:
temp = tixi.updateDoubleElement(FUEL_PATH + '/fuelOnCabin',\
ui.FUEL_ON_CABIN, '%g')
if temp != ui.FUEL_ON_CABIN and temp > 0:
ui.FUEL_ON_CABIN = temp
log.info('Data from CPACS file succesfully extracted')
# Saving and closing the cpacs file --------------------------------------
tixi.saveDocument(cpacs_in)
close_tixi(tixi, cpacs_in)
# Openign and closing again the cpacs file -------------------------------
tixi = open_tixi(cpacs_in)
tigl = open_tigl(tixi)
tixi.saveDocument(cpacs_in)
close_tixi(tixi, cpacs_in)
return(ui)
def get_data(ui, bi, mw, ed, cpacs_in):
""" The function extracts from the xml file the required input data,
the code will use the default value when they are missing.
INPUT
(class) ui --Arg.: UserInputs class.
(class) bi --Arg.: BalanceInputs class.
(class) mw --Arg.: MassesWeight class.
(class) ed --Arg.: EngineData class.
##======= Classes are defined in the InputClasses folder =======##
(char) cpacs_in --Arg.: Relative location of the xml file in the
ToolInput folder (cpacs option) or
relative location of the temp. xml file in
the ToolOutput folder (input option).
OUTPUT
(class) mw --Out.: MassesWeight class updated.
(class) ed --Out.: EngineData class updated.
(file) cpacs_in --Out.: Updated cpasc file.
"""
log.info('CPACS file path check')
# path definition ========================================================
# Opening CPACS file
tixi = open_tixi(cpacs_in)
TSPEC_PATH = '/cpacs/toolspecific/CEASIOMpy'
GEOM_PATH = TSPEC_PATH + '/geometry'
FMP_PATH = TSPEC_PATH + '/weight/passengers/fuelMassMaxpass/mass'
PROP_PATH = TSPEC_PATH + '/propulsion'
MASS_PATH = '/cpacs/vehicles/aircraft/model/analyses/massBreakdown'
MTOM_PATH = MASS_PATH + '/designMasses/mTOM/mass'
F_PATH = MASS_PATH + '/fuel/massDescription/mass'
OEM_PATH = MASS_PATH + '/mOEM/massDescription/mass'
PAY_PATH = MASS_PATH + '/payload/massDescription/mass'
EN_PATH = '/cpacs/vehicles/engines/engine1/analysis/mass/mass'
BC_PATH = TSPEC_PATH + '/balance/userBalance'
create_branch(tixi, BC_PATH, False)
# Compulsory path checks =================================================
if not tixi.checkElement(TSPEC_PATH):
raise Exception('Missing required toolspecific path. Run '\
+ 'Weight_unc_main.py,'\
+ ' in the 4Weight_unc_module folder.')
elif not tixi.checkElement(MASS_PATH):
raise Exception('Missing required massBreakdown path. Run '\
+ 'Weight_unc_main.py,'\
+ ' in the 4Weight_unc_module folder.')
elif not tixi.checkElement(MTOM_PATH):
raise Exception('Missing required mTOM/mass path. Run '\
+ 'Weight_unc_main.py,'\
+ ' in the 4Weight_unc_module folder.')
elif not tixi.checkElement(FMP_PATH):
raise Exception('Missing required fuelMassMaxpass/mass path. Run '\
+ 'Weight_unc_main.py,'\
+ ' in the 4Weight_unc_module folder.')
elif not tixi.checkElement(OEM_PATH):
raise Exception('Missing required mOEM/massDescription/mass '\
+ 'path. Run Weight_unc_main.py,'\
+ ' in the 4Weight_unc_module folder.')
elif not tixi.checkElement(PAY_PATH):
raise Exception('Missing required payload/massDescription/mass '\
+ 'path. Run Weight_unc_main.py,'\
+ ' in the 4Weight_unc_module folder.')
elif not tixi.checkElement(F_PATH):
raise Exception('Missing required /fuel/massDescription/mass '\
+ 'path. Run Weight_unc_main.py,'\
+ ' in the 4Weight_unc_module folder.')
elif not tixi.checkElement(EN_PATH):
raise Exception('Missing required /cpacs/vehicles/engines/engine1'\
+ '/analysis/mass path. Run Weight_unc_main.py,'\
+ ' in the 4Weight_unc_module folder.')
else:
log.info('All path correctly defined in the toolinput.xml file, '\
+ 'beginning data extracction.')
# Gathering data =========================================================
## Geometry Data
if not tixi.checkElement(GEOM_PATH + '/floorsNb'):
tixi.createElement(GEOM_PATH, 'floorsNb')
tixi.updateDoubleElement(GEOM_PATH + '/floorsNb',\
ui.FLOORS_NB, '%g')
else:
temp = tixi.getDoubleElement(GEOM_PATH + '/floorsNb')
if temp != ui.FLOORS_NB and temp > 0:
ui.FLOORS_NB = temp
if not tixi.checkElement(GEOM_PATH + '/cabinHeight'):
tixi.createElement(GEOM_PATH, 'cabinHeight')
tixi.updateDoubleElement(GEOM_PATH + '/cabinHeight',\
ui.H_LIM_CABIN, '%g')
else:
temp = tixi.getDoubleElement(GEOM_PATH + '/cabinHeight')
if temp != ui.H_LIM_CABIN and temp > 0:
ui.H_LIM_CABIN = temp
## User Case Balance
if not tixi.checkElement(BC_PATH + '/userCase'):
tixi.createElement(BC_PATH, 'userCase')
if bi.USER_CASE:
tixi.updateTextElement(BC_PATH + '/userCase', 'True')
else:
tixi.updateTextElement(BC_PATH + '/userCase', 'False')
else:
temp = tixi.getTextElement(BC_PATH + '/userCase')
if temp == 'False':
bi.USER_CASE = False
else:
bi.USER_CASE = True
if bi.USER_CASE:
if tixi.checkElement(BC_PATH + '/fuelPercentage'):
bi.F_PERC=tixi.getDoubleElement(BC_PATH + '/fuelPercentage')
elif bi.F_PERC:
tixi.createElement(BC_PATH, 'fuelPercentage')
tixi.updateDoubleElement(BC_PATH + '/fuelPercentage',\
bi.F_PERC, '%g')
else:
raise Exception('User balance option defined'\
+ ' True but no fuel percentage data in the'\
+ ' CPACS file or in th BalanceInput class.')
if tixi.checkElement(BC_PATH + '/payloadPercentage'):
bi.P_PERC=tixi.getDoubleElement(BC_PATH + '/payloadPercentage')
elif bi.P_PERC:
tixi.createElement(BC_PATH, 'payloadPercentage')
tixi.updateDoubleElement(BC_PATH + '/payloadPercentage',\
bi.P_PERC, '%g')
else:
raise Exception('User balance option defined'\
+ ' True but no payload percentage data in'\
+ ' the CPACS file or in th BalanceInput class.')
## Engines Data
ed.en_mass = tixi.getDoubleElement(EN_PATH)
if not tixi.checkElement(PROP_PATH + '/wingMountedEngine'):
create_branch(tixi, PROP_PATH, False)
tixi.createElement(PROP_PATH, 'wingMountedEngine')
if ed.WING_MOUNTED:
tixi.updateTextElement(PROP_PATH + '/wingMountedEngine', 'True')
else:
tixi.updateTextElement(PROP_PATH + '/wingMountedEngine', 'False')
else:
temp = tixi.getTextElement(PROP_PATH + '/wingMountedEngine')
if temp == 'False':
ed.WING_MOUNTED = False
else:
ed.WING_MOUNTED = True
if not tixi.checkElement(PROP_PATH + '/userEnginePlacement'):
tixi.createElement(PROP_PATH, 'userEnginePlacement')
if bi.USER_EN_PLACEMENT:
tixi.updateTextElement(PROP_PATH + '/userEnginePlacement', 'True')
else:
tixi.updateTextElement(PROP_PATH + '/userEnginePlacement', 'False')
else:
temp = tixi.getTextElement(PROP_PATH + '/userEnginePlacement')
if temp == 'False':
bi.USER_EN_PLACEMENT = False
else:
bi.USER_EN_PLACEMENT = True
if not tixi.checkElement(PROP_PATH + '/engineNumber'):
create_branch(tixi, PROP_PATH, False)
tixi.createElement(PROP_PATH, 'engineNumber')
tixi.updateIntegerElement(PROP_PATH + '/engineNumber', ed.NE, '%i')
else:
ed.NE = tixi.getIntegerElement(PROP_PATH + '/engineNumber')
## User Engine Placement
tp=[]
ed.EN_NAME=[]
if tixi.checkElement(EN_PATH):
for e in range(0,ed.NE):
EN_PATH = '/cpacs/vehicles/engines'
if ed.NE > 1:
EN_PATH += '/engine' + str(e+1)
else:
EN_PATH += '/engine'
if not tixi.checkElement(EN_PATH):
raise Exception('Engine definition inclomplete, missing'\
+ ' one or more engines in the cpacs file')
if not tixi.checkElement(EN_PATH + '/name'):
ed.EN_NAME.append('Engine_' + str(e+1))
else:
ed.EN_NAME.append(tixi.getTextElement(EN_PATH + '/name'))
ENA_PATH = EN_PATH + '/analysis/mass'
if tixi.checkElement(ENA_PATH):
ed.en_mass = tixi.getDoubleElement(ENA_PATH + '/mass')
tp.append(ed.en_mass)
if e > 0 and ed.en_mass != tp[e-1]:
log.warning('The engines have different masses,'\
+ 'this can lead to an unbalanced aircraft')
elif ed.en_mass:
tixi.createElement(ENA_PATH, 'mass')
tixi.updateDoubleElement(ENA_PATH + '/mass', ed.en_mass, '%g')
else:
raise Exception('Engine definition inclomplete, missing'\
+ ' engine mass in the cpacs file')
s = np.shape(ed.EN_PLACEMENT)
warn = False
if not ed.NE:
raise Exception('No engine defined for the aircraft')
elif s[0] < ed.NE or s[1] < 3 or np.any(ed.EN_PLACEMENT) == False:
warn=True
else:
log.info('EngineData class defined correctly.')
s = ed.EN_PLACEMENT
if bi.USER_EN_PLACEMENT:
ed.EN_PLACEMENT = []
for e in range(1,ed.NE+1):
if ed.NE > 1:
ENLOC_PATH = '/cpacs/vehicles/engines/engine' + str(e)\
+ '/analysis/mass/location'
else:
ENLOC_PATH = '/cpacs/vehicles/engines/engine'\
+ '/analysis/mass/location'
if not tixi.checkElement(ENLOC_PATH) and warn:
raise Exception('User engine Placement option defined'\
+ ' True but no engine placement data in the'\
+ ' CPACS file.')
if not tixi.checkElement(ENLOC_PATH) and not warn:
create_branch(tixi, ENLOC_PATH, False)
tixi.createElement(ENLOC_PATH, 'x')
tixi.createElement(ENLOC_PATH, 'y')
tixi.createElement(ENLOC_PATH, 'z')
tixi.updateDoubleElement(ENLOC_PATH +'/x', s[e-1][0], '%g')
tixi.updateDoubleElement(ENLOC_PATH +'/y', s[e-1][1], '%g')
tixi.updateDoubleElement(ENLOC_PATH +'/z', s[e-1][2], '%g')
ed.EN_PLACEMENT.append([s[e-1][0], s[e-1][1], s[e-1][2]])
else:
x=tixi.getDoubleElement(ENLOC_PATH + '/x')
y=tixi.getDoubleElement(ENLOC_PATH + '/y')
z=tixi.getDoubleElement(ENLOC_PATH + '/z')
ed.EN_PLACEMENT.append([x,y,z])
ed.EN_PLACEMENT=np.array(ed.EN_PLACEMENT)
## REQUIRED TOOLSPECIFIC DATA ============================================
# Fuel
mw.mass_fuel_maxpass = tixi.getDoubleElement(FMP_PATH)
## REQUIRED MASSBREAKDOWN DATA ===========================================
mw.maximum_take_off_mass = tixi.getDoubleElement(MTOM_PATH)
mw.operating_empty_mass = tixi.getDoubleElement(OEM_PATH)
mw.mass_payload = tixi.getDoubleElement(PAY_PATH)
mw.mass_fuel_tot = tixi.getDoubleElement(F_PATH)
log.info('Data from CPACS file succesfully extracted')
# Saving and closing the cpacs file ======================================
tixi.saveDocument(cpacs_in)
close_tixi(tixi, cpacs_in)
# Openign and closing again the cpacs file ===============================
tixi = open_tixi(cpacs_in)
tigl = open_tigl(tixi)
tixi.saveDocument(cpacs_in)
close_tixi(tixi, cpacs_in)
return(mw, ed)
#=============================================================================
# MAIN
#=============================================================================
if __name__ == '__main__':
log.warning('#########################################################')
log.warning('# ERROR NOT A STANDALONE PROGRAM, RUN balanceuncmain.py #')
log.warning('#########################################################')
| 41.987374
| 80
| 0.541168
|
4a05dc3d5c7aac5432c75f6e8642483918cb9c71
| 128
|
py
|
Python
|
modular/Demo02/Collections02.py
|
walkingtyphoon/Python-workspace
|
e872bce82b2bac3dd5d809f8576345ccc1c6afb7
|
[
"Apache-2.0"
] | null | null | null |
modular/Demo02/Collections02.py
|
walkingtyphoon/Python-workspace
|
e872bce82b2bac3dd5d809f8576345ccc1c6afb7
|
[
"Apache-2.0"
] | null | null | null |
modular/Demo02/Collections02.py
|
walkingtyphoon/Python-workspace
|
e872bce82b2bac3dd5d809f8576345ccc1c6afb7
|
[
"Apache-2.0"
] | null | null | null |
from collections import deque
q = deque([1, 2, 3])
print(q)
q.append(6)
print("追加后的元素:", q)
q.appendleft(0)
print("追加后的元素:", q)
| 16
| 29
| 0.664063
|
4a05dc8c69da0cb534547666277ceb977a4d720a
| 86
|
py
|
Python
|
components/stairs.py
|
thomerickson/roguelike
|
011a85d53685c922b2b3ddcd28d59818359d15dc
|
[
"MIT"
] | null | null | null |
components/stairs.py
|
thomerickson/roguelike
|
011a85d53685c922b2b3ddcd28d59818359d15dc
|
[
"MIT"
] | null | null | null |
components/stairs.py
|
thomerickson/roguelike
|
011a85d53685c922b2b3ddcd28d59818359d15dc
|
[
"MIT"
] | null | null | null |
# stairs.py
class Stairs():
def __init__(self, floor):
self.floor = floor
| 17.2
| 30
| 0.616279
|
4a05dec61c3b0273598334f54da784d2d25e5ccb
| 348
|
py
|
Python
|
scrapinsta/domain/entities/IScrapinsta.py
|
matheuskolln/scrapinsta
|
47ec816f33a36e8570d4c56f921ba18a2d12a306
|
[
"MIT"
] | 1
|
2021-09-05T05:37:22.000Z
|
2021-09-05T05:37:22.000Z
|
scrapinsta/domain/entities/IScrapinsta.py
|
matheuskolln/scrapinsta
|
47ec816f33a36e8570d4c56f921ba18a2d12a306
|
[
"MIT"
] | 13
|
2020-11-06T17:43:46.000Z
|
2021-09-05T00:13:02.000Z
|
scrapinsta/domain/entities/IScrapinsta.py
|
matheuskolln/scrapinsta
|
47ec816f33a36e8570d4c56f921ba18a2d12a306
|
[
"MIT"
] | 2
|
2020-11-09T20:39:57.000Z
|
2021-09-05T01:37:29.000Z
|
from abc import ABC, abstractmethod
from typing import List
class IScrapinsta(ABC):
@abstractmethod
def login(self) -> None:
pass
@abstractmethod
def get_followers(self, user: str, amount: int) -> List[str]:
pass
@abstractmethod
def get_following(self, user: str, amount: int) -> List[str]:
pass
| 20.470588
| 65
| 0.646552
|
4a05df9337a4e69d1e8e148cf3341603a392a46b
| 1,501
|
py
|
Python
|
vscode/utils.py
|
TTitcombe/vscode-ext
|
925db8ba809621516722fd1557cc3fd701247497
|
[
"MIT"
] | 140
|
2021-07-25T13:54:09.000Z
|
2022-02-23T23:52:53.000Z
|
vscode/utils.py
|
Nicholas-Schaub/vscode-ext
|
3a8b54146e368c67d3e6db7c3929d05e52cbd947
|
[
"MIT"
] | 24
|
2021-07-25T14:22:57.000Z
|
2022-01-25T01:37:42.000Z
|
vscode/utils.py
|
Nicholas-Schaub/vscode-ext
|
3a8b54146e368c67d3e6db7c3929d05e52cbd947
|
[
"MIT"
] | 19
|
2021-07-25T08:13:36.000Z
|
2022-02-12T20:52:04.000Z
|
from typing import Optional
__all__ = (
"log",
"camel_case_to_snake_case",
"snake_case_to_camel_case",
"snake_case_to_title_case",
"python_condition_to_js_condition",
)
def log(*args, **kwargs):
kwargs["flush"] = True
print(*args, **kwargs)
def camel_case_to_snake_case(text: str) -> str:
return "".join("_" + i.lower() if i.isupper() else i for i in text).lstrip("_")
def snake_case_to_camel_case(text: Optional[str]) -> Optional[str]:
if text is None:
return None
temp = text.split("_")
return temp[0] + "".join(ele.title() for ele in temp[1:])
def snake_case_to_title_case(text: Optional[str]) -> Optional[str]:
if text is None:
return None
return text.replace("_", " ").title()
def python_condition_to_js_condition(condition: Optional[str]) -> Optional[str]:
if condition is None:
return None
condition = " ".join(
i if "_" not in i else snake_case_to_camel_case(i) for i in condition.split(" ")
)
condition = condition.replace(" and ", " && ")
condition = condition.replace(" or ", " || ")
if " not " in condition:
if "(" not in condition or ")" not in condition:
raise SyntaxError(
"Use parenthesis '()' while using 'not' otherwise your conditions might not work as expected!"
)
else:
condition = condition.replace(" not ", " !")
return condition
| 27.796296
| 111
| 0.596935
|
4a05dff28e07fd05c42da14211d72b941f50d4f6
| 650
|
py
|
Python
|
app/display_modules/pathways/models.py
|
MetaGenScope/metagenscope-server
|
609cd57c626c857c8efde8237a1f22f4d1e6065d
|
[
"MIT"
] | null | null | null |
app/display_modules/pathways/models.py
|
MetaGenScope/metagenscope-server
|
609cd57c626c857c8efde8237a1f22f4d1e6065d
|
[
"MIT"
] | null | null | null |
app/display_modules/pathways/models.py
|
MetaGenScope/metagenscope-server
|
609cd57c626c857c8efde8237a1f22f4d1e6065d
|
[
"MIT"
] | null | null | null |
"""Models for pathways."""
from app.extensions import mongoDB as mdb
# Define aliases
EmbeddedDoc = mdb.EmbeddedDocumentField # pylint: disable=invalid-name
class PathwaySampleDocument(mdb.EmbeddedDocument): # pylint: disable=too-few-public-methods
"""Pathway for a single sample."""
pathway_abundances = mdb.MapField(mdb.FloatField(), required=True)
pathway_coverages = mdb.MapField(mdb.FloatField(), required=True)
class PathwayResult(mdb.EmbeddedDocument): # pylint: disable=too-few-public-methods
"""Set of pathway results."""
samples = mdb.MapField(field=EmbeddedDoc(PathwaySampleDocument), required=True)
| 30.952381
| 92
| 0.747692
|
4a05e069c8cf0c2aff65a77562170fc9441c2403
| 449
|
py
|
Python
|
processing/file_list.py
|
ssbgp/data-tools
|
ec8caf5831eae6a35fd95bb2fb86cf77434bf4d9
|
[
"MIT"
] | null | null | null |
processing/file_list.py
|
ssbgp/data-tools
|
ec8caf5831eae6a35fd95bb2fb86cf77434bf4d9
|
[
"MIT"
] | null | null | null |
processing/file_list.py
|
ssbgp/data-tools
|
ec8caf5831eae6a35fd95bb2fb86cf77434bf4d9
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Iterator
from collections import Iterable
from processing.file_collection import FileCollection
class FileList(FileCollection):
""" A simple list of files """
def __init__(self, iterable: Iterable) -> None:
self._list = list(iterable)
def __iter__(self) -> Iterator[Path]:
""" Returns an iterator to iterate over each file in the list """
return iter(self._list)
| 24.944444
| 73
| 0.706013
|
4a05e0b1f45dd90a9b2a99a5d94c4d051384ec1c
| 392
|
py
|
Python
|
app/modules.py
|
yusufsyaifudin/stex
|
ede1450fa1da296c52353a75f709302bc7bf6f38
|
[
"MIT"
] | null | null | null |
app/modules.py
|
yusufsyaifudin/stex
|
ede1450fa1da296c52353a75f709302bc7bf6f38
|
[
"MIT"
] | null | null | null |
app/modules.py
|
yusufsyaifudin/stex
|
ede1450fa1da296c52353a75f709302bc7bf6f38
|
[
"MIT"
] | null | null | null |
from flask import render_template
from app import app
# Import a module / component using its blueprint handler variable
from app.home_module.controllers import mod_home
# Register blueprint(s)
# app.register_blueprint(xyz_module)
# ..
app.register_blueprint(mod_home)
# Sample HTTP error handling
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
| 21.777778
| 66
| 0.790816
|
4a05e1ecee90fe4c8e6643f031c5cfa7dcb965f6
| 932
|
py
|
Python
|
models/dqn_state.py
|
abefetterman/slither
|
fb5a45f40dbc806879caf4e0a758c074ad4d7aca
|
[
"MIT"
] | null | null | null |
models/dqn_state.py
|
abefetterman/slither
|
fb5a45f40dbc806879caf4e0a758c074ad4d7aca
|
[
"MIT"
] | null | null | null |
models/dqn_state.py
|
abefetterman/slither
|
fb5a45f40dbc806879caf4e0a758c074ad4d7aca
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
import math
from methods.utils import conv_chain
class DQN(nn.Module):
def __init__(self, h, w, batch_norm=False):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1)
conv_out_h, conv_out_w = conv_chain((h,w), [self.conv1, self.conv2])
self.hidden = nn.Linear(conv_out_h*conv_out_w*32, 256)
self.head = nn.Linear(256, 4)
if batch_norm:
self.bn1 = nn.BatchNorm2d(32)
self.bn2 = nn.BatchNorm2d(32)
else:
self.bn1 = lambda x: x
self.bn2 = lambda x: x
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.hidden(x.view(x.size(0), -1)))
#print(x.size())
return self.head(x)
| 31.066667
| 76
| 0.589056
|
4a05e1f188ae7cd1c99dc41e22303f5c9b66adc1
| 884
|
py
|
Python
|
tf2_models/utils.py
|
samiraabnar/DistillingInductiveBias
|
962f87e7d38a3d255846432286e048d176ed7a5d
|
[
"MIT"
] | 10
|
2020-07-04T09:11:36.000Z
|
2021-12-16T13:06:35.000Z
|
tf2_models/utils.py
|
samiraabnar/DistillingInductiveBias
|
962f87e7d38a3d255846432286e048d176ed7a5d
|
[
"MIT"
] | null | null | null |
tf2_models/utils.py
|
samiraabnar/DistillingInductiveBias
|
962f87e7d38a3d255846432286e048d176ed7a5d
|
[
"MIT"
] | 3
|
2021-07-09T16:24:07.000Z
|
2022-02-07T15:49:05.000Z
|
import tensorflow as tf
import re
from tensorboard.compat.tensorflow_stub import tensor_shape
def camel2snake(name):
return name[0].lower() + re.sub(r'(?!^)[A-Z]', lambda x: '_' + x.group(0).lower(), name[1:])
def log_summary(log_value, log_name, summary_scope):
"""Produce scalar summaries."""
with tf.compat.v2.summary.experimental.summary_scope(summary_scope):
tf.summary.scalar(log_name, log_value)
def create_init_var(unnested_state_size, i, initializer_range):
flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
init_state_size = [1] + flat_dims
return tf.Variable(shape=init_state_size, dtype=tf.float32,
initial_value=tf.keras.initializers.TruncatedNormal(stddev=initializer_range)(
shape=init_state_size),
trainable=True,
name="lstm_init_" + str(i))
| 32.740741
| 99
| 0.692308
|
4a05e2fa26cfc12ddc82be7f8599aa5d4e5546e6
| 1,329
|
py
|
Python
|
scripts/exponentiation_timing/expm_comparisons.py
|
Evan1415/QMLA
|
4521f7c08456a4494aed7c1b78d8ded5ea40f3d8
|
[
"MIT"
] | null | null | null |
scripts/exponentiation_timing/expm_comparisons.py
|
Evan1415/QMLA
|
4521f7c08456a4494aed7c1b78d8ded5ea40f3d8
|
[
"MIT"
] | null | null | null |
scripts/exponentiation_timing/expm_comparisons.py
|
Evan1415/QMLA
|
4521f7c08456a4494aed7c1b78d8ded5ea40f3d8
|
[
"MIT"
] | null | null | null |
import os as os
import warnings
import numpy as np
import itertools as itr
import matplotlib.pyplot as plt
import sys as sys
import pandas as pd
import warnings
import time as time
import logging
import random
import pickle
pickle.HIGHEST_PROTOCOL = 4
p = os.path.abspath(os.path.realpath(__file__))
elements = p.split('/')[:-2]
qmla_root = os.path.abspath('/'.join(elements))
sys.path.append(qmla_root)
import qmla
from qmla import construct_models
from expm import expm
from scipy.linalg import expm as lexpm
num_iterations = 100
t_expm = 0
t_lexpm = 0
for i in range(num_iterations):
model_params = {
'FH-hopping-sum_up_1h2_1h3_2h4_3h4_d4': np.random.uniform(0,1),
'FH-onsite-sum_1_2_3_4_d4': np.random.uniform(0,1),
'FH-hopping-sum_down_1h2_1h3_2h4_3h4_d4': np.random.uniform(0,1)
}
model = sum([
model_params[term] * qmla.construct_models.compute(term) for term in model_params
])
t = np.random.uniform(0,100)
start_expm = time.time()
u = expm(-1j*model*t)
t_expm += time.time() - start_expm
start_lexpm = time.time()
u = lexpm(-1j*model*t)
t_lexpm += time.time() - start_expm
print("Total times taken:\n \texpm={} \n\tlexpm={} \n\tSpeedup={}".format(
np.round(t_expm, 2), np.round(t_lexpm, 2), np.round(t_lexpm/t_expm, 2)
))
| 22.525424
| 89
| 0.69526
|
4a05e4d01b698a17f0f01a28150823fd8772b303
| 2,145
|
py
|
Python
|
src/lib/celery/__init__.py
|
Jiawei2333/holmuskTest
|
46d85619915f977c2ca8e8a431db3c916ac157c7
|
[
"MIT"
] | 1
|
2019-08-31T02:19:33.000Z
|
2019-08-31T02:19:33.000Z
|
src/lib/celery/__init__.py
|
Jiawei2333/holmuskTest
|
46d85619915f977c2ca8e8a431db3c916ac157c7
|
[
"MIT"
] | 7
|
2020-01-28T22:54:41.000Z
|
2022-02-10T00:15:57.000Z
|
src/lib/celery/__init__.py
|
kennethleung-holmusk/holmuskTest
|
7580f029a06d16a6a319965dd0d1ea466d0c0c64
|
[
"MIT"
] | null | null | null |
'''library for the celery worker
This contains a library that will generate a celery app. This is
a library that is provided so that everything can be made as
simple as possible. There is no need to change anything in this
library, and this library should work as is. The currelt celery
library works usign an updated logger, and this will create its
own logger.
All requirements for this library can be specified within the
configuration file ``config/celery.json`` Currently this relies
upon you geenrating the broker and results backend, all of which
can be easily canged within the configuration file.
.. code-block:: python
:emphasize-lines: 2,9
{
"base":{
"name" : "holmuskTest",
"BROKER_URL" : "redis://localhost:6379/0",
"BACKEND_URL" : "redis://localhost:6379/1",
"include" : ["lib.celeryWorkerExample.worker_1"]
},
"extra" : {
"result_expires" : 3600
}
}
It is absolutely essential that you specify the ``"base"`` configuration. This
is where information about the name (which defaults to the name of the project),
the ``BROKER_URL`` and the ``BACKEND_URL`` must be specified. The default is a
local Redis instance, and this will certainly have to be modified to suit your
needs.
All workers must be specified in the ``base.includes`` specification. You may
specify as many as you want.
All other information **must** be specified within the ``extra`` configuration.
Once this is specified, it is possible to run a set of celery workers using the
command ``make runCelery`` in the ``src`` folder. This will allow you run 4
parallel workers. If you want to start many more (depending upon your processor
capabilities) you should start the celery worker yourself using the command:
.. code-block:: bash
celery -A lib.celery.App worker --concurrency=10 --loglevel=INFO
Note that celery provides multiple ways of startng workers as shown
[here](http://docs.celeryproject.org/en/latest/userguide/workers.html)
including autoscaling, etc. and you are welcome to experiment with all its
features.
'''
| 38.303571
| 80
| 0.72028
|
4a05e53a1cb60e71bfce4960cd0a5062bcd88462
| 959
|
py
|
Python
|
boomerang/client.py
|
olalidmark/boomerang-client
|
c4a1d9d96190104b56e10faa78b37ee287929a17
|
[
"MIT"
] | null | null | null |
boomerang/client.py
|
olalidmark/boomerang-client
|
c4a1d9d96190104b56e10faa78b37ee287929a17
|
[
"MIT"
] | null | null | null |
boomerang/client.py
|
olalidmark/boomerang-client
|
c4a1d9d96190104b56e10faa78b37ee287929a17
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import urlparse
import requests
__author__ = 'ola'
class BoomerangClient:
BASE_URL = 'http://api.boomerang.io/v1'
def __init__(self, project_id, api_key):
self.url = "%s/api_key/%s/projects/%s/boomerangs/" % (self.BASE_URL, api_key, project_id)
def boomerang_url(self, bid):
return urlparse.urljoin(self.url, bid)
def get_all_boomerangs(self):
res = requests.get(self.url)
return res
def get_one_boomerang(self, boomerang_id):
res = requests.get(self.boomerang_url(boomerang_id))
return res
def create_boomerang(self, params):
res = requests.post(self.url, params)
return res
def update_boomerang(self, boomerang_id):
res = requests.put(self.boomerang_url(boomerang_id))
return res
def delete_boomerang(self, boomerang_id):
res = requests.delete(self.boomerang_url(boomerang_id))
return res
| 22.833333
| 97
| 0.662148
|
4a05e53bf4ea41ea3099120b9e36a93fab5e0658
| 447
|
py
|
Python
|
product/migrations/0005_review_date.py
|
MW982/Django-shop
|
da28348f93fbe4c545495b7ce43bca7db8f2308b
|
[
"MIT"
] | null | null | null |
product/migrations/0005_review_date.py
|
MW982/Django-shop
|
da28348f93fbe4c545495b7ce43bca7db8f2308b
|
[
"MIT"
] | 7
|
2020-06-06T01:07:24.000Z
|
2022-02-10T11:36:55.000Z
|
product/migrations/0005_review_date.py
|
MW982/Django-shop
|
da28348f93fbe4c545495b7ce43bca7db8f2308b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.4 on 2019-09-10 11:37
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("product", "0004_auto_20190910_1318"),
]
operations = [
migrations.AddField(
model_name="review",
name="date",
field=models.DateTimeField(blank=True, default=django.utils.timezone.now),
),
]
| 22.35
| 86
| 0.630872
|
4a05e623ab936bb688a75022e7601b42d1d0bf60
| 3,641
|
py
|
Python
|
huaweicloud-sdk-ivs/huaweicloudsdkivs/v2/model/ivs_extention_by_name_and_id_request_body.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-ivs/huaweicloudsdkivs/v2/model/ivs_extention_by_name_and_id_request_body.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-ivs/huaweicloudsdkivs/v2/model/ivs_extention_by_name_and_id_request_body.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class IvsExtentionByNameAndIdRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'meta': 'Meta',
'data': 'IvsExtentionByNameAndIdRequestBodyData'
}
attribute_map = {
'meta': 'meta',
'data': 'data'
}
def __init__(self, meta=None, data=None):
"""IvsExtentionByNameAndIdRequestBody - a model defined in huaweicloud sdk"""
self._meta = None
self._data = None
self.discriminator = None
self.meta = meta
self.data = data
@property
def meta(self):
"""Gets the meta of this IvsExtentionByNameAndIdRequestBody.
:return: The meta of this IvsExtentionByNameAndIdRequestBody.
:rtype: Meta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this IvsExtentionByNameAndIdRequestBody.
:param meta: The meta of this IvsExtentionByNameAndIdRequestBody.
:type: Meta
"""
self._meta = meta
@property
def data(self):
"""Gets the data of this IvsExtentionByNameAndIdRequestBody.
:return: The data of this IvsExtentionByNameAndIdRequestBody.
:rtype: IvsExtentionByNameAndIdRequestBodyData
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this IvsExtentionByNameAndIdRequestBody.
:param data: The data of this IvsExtentionByNameAndIdRequestBody.
:type: IvsExtentionByNameAndIdRequestBodyData
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IvsExtentionByNameAndIdRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.772059
| 85
| 0.569898
|
4a05e6a47091e892cbaed95b5add6eb85af36139
| 656
|
py
|
Python
|
api/drinks/views/uom.py
|
gthole/drink-stash
|
873393fddeaa8f58a8b70d082fbff60175901f97
|
[
"MIT"
] | 7
|
2019-03-11T22:43:38.000Z
|
2022-02-21T13:18:39.000Z
|
api/drinks/views/uom.py
|
gthole/drink-stash
|
873393fddeaa8f58a8b70d082fbff60175901f97
|
[
"MIT"
] | 62
|
2018-10-13T17:39:07.000Z
|
2022-02-26T06:21:41.000Z
|
api/drinks/views/uom.py
|
gthole/drink-stash
|
873393fddeaa8f58a8b70d082fbff60175901f97
|
[
"MIT"
] | 1
|
2020-10-31T16:10:59.000Z
|
2020-10-31T16:10:59.000Z
|
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from rest_framework.permissions import IsAuthenticated
from drinks.models import Uom
from drinks.serializers import UomSerializer
from drinks.views.base import LazyViewSet
class UomViewSet(LazyViewSet):
audit_field = 'created'
http_method_names = ['get', 'head']
queryset = Uom.objects.all().order_by('name')
serializer_class = UomSerializer
# Cache requested url for 2 hours
@method_decorator(cache_page(60 * 60 * 2))
def list(self, request, format=None):
return super(UomViewSet, self).list(request, format)
| 32.8
| 60
| 0.76372
|
4a05e7dc70bcd875370848e82717b563d83bc601
| 4,126
|
py
|
Python
|
sbcdb/spectra_utils.py
|
neilswainston/Grimoire
|
42775ff9a03fdbd3b47269b46c883fdf5b37a2be
|
[
"MIT"
] | 9
|
2019-04-24T12:47:10.000Z
|
2021-05-12T12:46:33.000Z
|
sbcdb/spectra_utils.py
|
neilswainston/Grimoire
|
42775ff9a03fdbd3b47269b46c883fdf5b37a2be
|
[
"MIT"
] | 1
|
2017-01-16T08:45:19.000Z
|
2017-01-16T08:45:19.000Z
|
sbcdb/spectra_utils.py
|
synbiochem/biochem4j
|
42775ff9a03fdbd3b47269b46c883fdf5b37a2be
|
[
"MIT"
] | 5
|
2019-10-13T14:02:28.000Z
|
2020-12-23T18:44:29.000Z
|
'''
SYNBIOCHEM-DB (c) University of Manchester 2015
SYNBIOCHEM-DB is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
import os
import tempfile
from urllib import urlretrieve
import zipfile
import ijson
__MONA_URL = 'http://mona.fiehnlab.ucdavis.edu/rest/downloads/retrieve/' + \
'd2eb33f0-b22e-49a7-bc31-eb951f8347b2'
__MONA_FILENAME = 'MoNA-export-All_Spectra.json'
_NAME_MAP = {'kegg': 'kegg.compound',
'molecular formula': 'formula',
'total exact mass': 'monoisotopic_mass:float'}
def load(writer, chem_manager,
array_delimiter='|', url=__MONA_URL, filename=__MONA_FILENAME):
'''Build Spectrum nodes and relationships.'''
nodes = []
rels = []
records = _parse(_get_file(url, filename), array_delimiter)
for record in records:
chem_id, _ = chem_manager.add_chemical(record['chemical'])
nodes.append(record['spectrum'])
rels.append([chem_id, 'has', record['spectrum']['id:ID(Spectrum)']])
return [writer.write_nodes(nodes, 'Spectrum')], \
[writer.write_rels(rels, 'Chemical', 'Spectrum')]
def _parse(filename, array_delimiter):
'''Parses MoNA json file.'''
records = []
record = {'chemical': {'names:string[]': []},
'spectrum': {':LABEL': 'Spectrum', 'tags:string[]': []}}
name = None
for prefix, typ, value in ijson.parse(open(filename)):
if prefix == 'item' and typ == 'start_map':
record = {'chemical': {'names:string[]': []},
'spectrum': {':LABEL': 'Spectrum',
'tags:string[]': []}}
elif prefix == 'item.compound.item.inchi':
record['chemical']['inchi'] = value
elif prefix == 'item.compound.item.names.item.name':
if 'name' not in record['chemical']:
record['chemical']['name'] = value
record['chemical']['names:string[]'].append(value)
elif prefix == 'item.compound.item.metaData.item.name' or \
prefix == 'item.metaData.item.name':
name = _normalise_name(value.lower())
elif prefix == 'item.compound.item.metaData.item.value':
_parse_compound_metadata(name, value, record)
name = None
elif prefix == 'item.id':
record['spectrum']['id:ID(Spectrum)'] = value
elif prefix == 'item.metaData.item.value':
record['spectrum'][name] = value
name = None
elif prefix == 'item.spectrum':
values = [float(val) for term in value.split()
for val in term.split(':')]
record['spectrum']['m/z:float[]'] = \
array_delimiter.join(map(str, values[0::2]))
record['spectrum']['I:float[]'] = \
array_delimiter.join(map(str, values[1::2]))
elif prefix == 'item.tags.item.text':
record['spectrum']['tags:string[]'].append(value)
elif prefix == 'item' and typ == 'end_map':
records.append(record)
return records
def _get_file(url, filename):
'''Gets file from url.'''
destination = os.path.join(os.path.expanduser('~'), 'MoNA')
if not os.path.exists(destination):
os.makedirs(destination)
filepath = os.path.join(destination, filename)
if not os.path.exists(filepath):
tmp_file = tempfile.NamedTemporaryFile(delete=False)
urlretrieve(url, tmp_file.name)
zfile = zipfile.ZipFile(tmp_file.name, 'r')
filepath = os.path.join(destination, zfile.namelist()[0])
zfile.extractall(destination)
return filepath
def _parse_compound_metadata(name, value, record):
'''Parses compound metadata.'''
if name == 'chebi' and isinstance(value, unicode):
value = value.replace('CHEBI:', '').split()[0]
record['chemical'][_normalise_name(name)] = value
def _normalise_name(name):
'''Normalises name in name:value pairs.'''
if name in _NAME_MAP:
return _NAME_MAP[name]
return name.replace(':', '_')
| 33.819672
| 76
| 0.601794
|
4a05e830d8a9b6f40ea8a9a41b40e80734cca218
| 6,279
|
py
|
Python
|
djangocms_blog/forms.py
|
kapt-labs/djangocms-blog
|
d18382808766548c0ec1b9f0dabe443d5430aebf
|
[
"BSD-3-Clause"
] | 1
|
2022-01-09T20:23:10.000Z
|
2022-01-09T20:23:10.000Z
|
djangocms_blog/forms.py
|
kapt-labs/djangocms-blog
|
d18382808766548c0ec1b9f0dabe443d5430aebf
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_blog/forms.py
|
kapt-labs/djangocms-blog
|
d18382808766548c0ec1b9f0dabe443d5430aebf
|
[
"BSD-3-Clause"
] | 1
|
2021-05-26T15:17:13.000Z
|
2021-05-26T15:17:13.000Z
|
from django import forms
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.validators import MaxLengthValidator
from django.utils.functional import cached_property
from parler.forms import TranslatableModelForm
from taggit_autosuggest.widgets import TagAutoSuggest
from .models import BlogCategory, BlogConfig, Post
from .settings import PERMALINK_TYPE_CATEGORY, get_setting
User = get_user_model()
class ConfigFormBase:
"""Base form class for all models depends on app_config."""
@cached_property
def app_config(self):
"""
Return the currently selected app_config, whether it's an instance attribute or passed in the request
"""
if getattr(self.instance, "app_config_id", None):
return self.instance.app_config
elif "app_config" in self.initial:
return BlogConfig.objects.get(pk=self.initial["app_config"])
elif self.data.get("app_config", None):
return BlogConfig.objects.get(pk=self.data["app_config"])
return None
class CategoryAdminForm(ConfigFormBase, TranslatableModelForm):
def __init__(self, *args, **kwargs):
self.base_fields["meta_description"].validators = [MaxLengthValidator(get_setting("META_DESCRIPTION_LENGTH"))]
original_attrs = self.base_fields["meta_description"].widget.attrs
if "cols" in original_attrs:
del original_attrs["cols"]
if "rows" in original_attrs:
del original_attrs["rows"]
original_attrs["maxlength"] = get_setting("META_DESCRIPTION_LENGTH")
self.base_fields["meta_description"].widget = forms.TextInput(original_attrs)
super().__init__(*args, **kwargs)
if "parent" in self.fields:
qs = self.fields["parent"].queryset
if self.instance.pk:
qs = qs.exclude(pk__in=[self.instance.pk] + [child.pk for child in self.instance.descendants()])
config = None
if getattr(self.instance, "app_config_id", None):
qs = qs.namespace(self.instance.app_config.namespace)
elif "app_config" in self.initial:
config = BlogConfig.objects.get(pk=self.initial["app_config"])
elif self.data.get("app_config", None):
config = BlogConfig.objects.get(pk=self.data["app_config"])
if config:
qs = qs.namespace(config.namespace)
self.fields["parent"].queryset = qs
class Meta:
model = BlogCategory
fields = "__all__"
class BlogPluginForm(forms.ModelForm):
"""Base plugin form to inject the list of configured template folders from BLOG_PLUGIN_TEMPLATE_FOLDERS."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if "template_folder" in self.fields:
self.fields["template_folder"].choices = get_setting("PLUGIN_TEMPLATE_FOLDERS")
class LatestEntriesForm(BlogPluginForm):
"""Custom forms for BlogLatestEntriesPlugin to properly load taggit-autosuggest."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["tags"].widget = TagAutoSuggest("taggit.Tag")
class Media:
css = {"all": ("{}djangocms_blog/css/{}".format(settings.STATIC_URL, "djangocms_blog_admin.css"),)}
class AuthorPostsForm(BlogPluginForm):
"""Custom form for BlogAuthorPostsPlugin to apply distinct to the list of authors in plugin changeform."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# apply distinct due to django issue #11707
self.fields["authors"].queryset = User.objects.filter(djangocms_blog_post_author__publish=True).distinct()
class PostAdminFormBase(ConfigFormBase, TranslatableModelForm):
"""
Common methods between the admin and wizard form
"""
class Meta:
model = Post
fields = "__all__"
@cached_property
def available_categories(self):
qs = BlogCategory.objects
if self.app_config:
return qs.namespace(self.app_config.namespace).active_translations()
return qs
def _post_clean_translation(self, translation):
# This is a quickfix for https://github.com/django-parler/django-parler/issues/236
# which needs to be fixed in parler
# operating at form level ensure that if the model is validated outside the form
# the uniqueness check is not disabled
super()._post_clean_translation(translation)
self._validate_unique = False
class PostAdminForm(PostAdminFormBase):
def __init__(self, *args, **kwargs):
self.base_fields["meta_description"].validators = [MaxLengthValidator(get_setting("META_DESCRIPTION_LENGTH"))]
original_attrs = self.base_fields["meta_description"].widget.attrs
if "cols" in original_attrs:
del original_attrs["cols"]
if "rows" in original_attrs:
del original_attrs["rows"]
original_attrs["maxlength"] = get_setting("META_DESCRIPTION_LENGTH")
self.base_fields["meta_description"].widget = forms.TextInput(original_attrs)
self.base_fields["meta_title"].validators = [MaxLengthValidator(get_setting("META_TITLE_LENGTH"))]
super().__init__(*args, **kwargs)
if "categories" in self.fields:
if self.app_config and self.app_config.url_patterns == PERMALINK_TYPE_CATEGORY:
self.fields["categories"].required = True
self.fields["categories"].queryset = self.available_categories
if "app_config" in self.fields:
# Don't allow app_configs to be added here. The correct way to add an
# apphook-config is to create an apphook on a cms Page.
self.fields["app_config"].widget.can_add_related = False
if self.app_config:
if not self.initial.get("main_image_full", ""):
self.initial["main_image_full"] = self.app_config.app_data["config"].get("default_image_full")
if not self.initial.get("main_image_thumbnail", ""):
self.initial["main_image_thumbnail"] = self.app_config.app_data["config"].get(
"default_image_thumbnail"
)
| 42.714286
| 118
| 0.675426
|
4a05e8e44751a8c358d2dd267db6c60b64abbabc
| 2,903
|
py
|
Python
|
sync_traccar_erpnext.py
|
Nayar/frappe-fleet-management-system
|
0cea9f5397aa87e7f11eaf6cbe6943b2199b37e6
|
[
"MIT"
] | null | null | null |
sync_traccar_erpnext.py
|
Nayar/frappe-fleet-management-system
|
0cea9f5397aa87e7f11eaf6cbe6943b2199b37e6
|
[
"MIT"
] | null | null | null |
sync_traccar_erpnext.py
|
Nayar/frappe-fleet-management-system
|
0cea9f5397aa87e7f11eaf6cbe6943b2199b37e6
|
[
"MIT"
] | 3
|
2019-09-09T17:18:37.000Z
|
2020-09-15T14:34:25.000Z
|
import http.client
from base64 import b64encode
import json
class Settings:
traccar_server='10.65.35.27:8082'
erpnext_server='10.65.35.117:8000'
traccar_auth_header={ 'Authorization' : 'Basic %s' % (b64encode(b"admin:admin").decode("ascii")) }
erpnext_headers = False
class MeraFrappeHelper:
def curl():
pass
class Vehicle:
def getVehiclesFromTraccar():
conn = http.client.HTTPConnection(Settings.traccar_server)
conn.request("GET", "/api/devices",headers=Settings.traccar_auth_header)
r1 = conn.getresponse()
body = r1.read().decode('UTF-8')
return json.loads(body)
pass
def getGroupsFromTraccar():
conn = http.client.HTTPConnection(Settings.traccar_server)
conn.request("GET", "/api/groups",headers=Settings.traccar_auth_header)
r1 = conn.getresponse()
body = r1.read().decode('UTF-8')
return json.loads(body)
pass
def getVehiclesFromErpnext():
conn = http.client.HTTPConnection(Settings.erpnext_server)
if(Settings.erpnext_headers == False):
conn = http.client.HTTPConnection(Settings.erpnext_server)
conn.request("GET", "/api/method/login?usr=Administrator&pwd=lol", headers={})
r1 = conn.getresponse()
headers = r1.getheaders()
for name,value in headers:
if (name == 'Set-Cookie' and 'sid' in value):
Settings.erpnext_headers = {'Cookie': value}
conn.request("POST", "/api/method/login?usr=Administrator&pwd=lol", headers=Settings.erpnext_headers)
r1 = conn.getresponse()
response = r1.read().decode('UTF-8')
conn.request("GET", '/api/resource/Vehicle',headers=Settings.erpnext_headers)
r1 = conn.getresponse()
response = r1.read().decode('UTF-8')
return json.loads(response)['data']
def sync_erpnext_traccar():
erpnext_vehicles = []
traccar_vehicles = []
for vehicle in Vehicle.getVehiclesFromTraccar():
traccar_vehicles.append(vehicle['name'])
if(vehicle['name'] not in erpnext_vehicles):
#print(vehicle['name'] + ' needs sync')
pass
for vehicle in Vehicle.getVehiclesFromErpnext():
erpnext_vehicles.append(vehicle['name'])
if(vehicle['name'] not in traccar_vehicles):
print(vehicle['name'] + ' needs sync to traccar')
conn = http.client.HTTPConnection(Settings.traccar_server)
Settings.traccar_auth_header['Content-type'] = 'application/json'
conn.request("POST", "/api/devices",headers=Settings.traccar_auth_header,body='{"uniqueId": "%s", "name" : "%s", "groupId" : 2}' % (vehicle['name'],vehicle['name']))
r1 = conn.getresponse()
body = r1.read().decode('UTF-8')
print(body)
pass
class Driver:
pass
print(Vehicle.getVehiclesFromTraccar())
print()
print()
#Vehicle.getVehiclesFromErpnext()
Vehicle.sync_erpnext_traccar()
print(Vehicle.getGroupsFromTraccar())
| 34.559524
| 173
| 0.67413
|
4a05e9a36bf772824d1148b769f5b914ca430f3d
| 9,323
|
py
|
Python
|
kedro/extras/datasets/pandas/parquet_dataset.py
|
austospumanto/kedro
|
4f89c8fd32c6660affa5ff7d4fe2b096d5de9c95
|
[
"Apache-2.0"
] | null | null | null |
kedro/extras/datasets/pandas/parquet_dataset.py
|
austospumanto/kedro
|
4f89c8fd32c6660affa5ff7d4fe2b096d5de9c95
|
[
"Apache-2.0"
] | null | null | null |
kedro/extras/datasets/pandas/parquet_dataset.py
|
austospumanto/kedro
|
4f89c8fd32c6660affa5ff7d4fe2b096d5de9c95
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""``ParquetDataSet`` loads/saves data from/to a Parquet file using an underlying
filesystem (e.g.: local, S3, GCS). It uses pandas to handle the Parquet file.
"""
import logging
from copy import deepcopy
from io import BytesIO
from pathlib import Path, PurePosixPath
from typing import Any, Dict
import fsspec
import pandas as pd
import pyarrow.parquet as pq
from kedro.io.core import (
PROTOCOL_DELIMITER,
AbstractVersionedDataSet,
DataSetError,
Version,
get_filepath_str,
get_protocol_and_path,
)
logger = logging.getLogger(__name__)
class ParquetDataSet(AbstractVersionedDataSet):
"""``ParquetDataSet`` loads/saves data from/to a Parquet file using an underlying
filesystem (e.g.: local, S3, GCS). It uses pandas to handle the Parquet file.
Example:
::
>>> from kedro.extras.datasets.pandas import ParquetDataSet
>>> import pandas as pd
>>>
>>> data = pd.DataFrame({'col1': [1, 2], 'col2': [4, 5],
>>> 'col3': [5, 6]})
>>>
>>> # data_set = ParquetDataSet(filepath="gcs://bucket/test.parquet")
>>> data_set = ParquetDataSet(filepath="test.parquet")
>>> data_set.save(data)
>>> reloaded = data_set.load()
>>> assert data.equals(reloaded)
"""
DEFAULT_LOAD_ARGS = {} # type: Dict[str, Any]
DEFAULT_SAVE_ARGS = {} # type: Dict[str, Any]
# pylint: disable=too-many-arguments
def __init__(
self,
filepath: str,
load_args: Dict[str, Any] = None,
save_args: Dict[str, Any] = None,
version: Version = None,
credentials: Dict[str, Any] = None,
fs_args: Dict[str, Any] = None,
) -> None:
"""Creates a new instance of ``ParquetDataSet`` pointing to a concrete Parquet file
on a specific filesystem.
Args:
filepath: Filepath in POSIX format to a Parquet file prefixed with a protocol like
`s3://`. If prefix is not provided, `file` protocol (local filesystem) will be used.
The prefix should be any protocol supported by ``fsspec``.
It can also be a path to a directory. If the directory is
provided then it can be used for reading partitioned parquet files.
Note: `http(s)` doesn't support versioning.
load_args: Additional options for loading Parquet file(s).
Here you can find all available arguments when reading single file:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_parquet.html
Here you can find all available arguments when reading partitioned datasets:
https://arrow.apache.org/docs/python/generated/pyarrow.parquet.ParquetDataset.html#pyarrow.parquet.ParquetDataset.read
All defaults are preserved.
save_args: Additional saving options for saving Parquet file(s).
Here you can find all available arguments:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_parquet.html
All defaults are preserved. ``partition_cols`` is not supported.
version: If specified, should be an instance of ``kedro.io.core.Version``.
If its ``load`` attribute is None, the latest version will be loaded. If
its ``save`` attribute is None, save version will be autogenerated.
credentials: Credentials required to get access to the underlying filesystem.
E.g. for ``GCSFileSystem`` it should look like `{"token": None}`.
fs_args: Extra arguments to pass into underlying filesystem class constructor
(e.g. `{"project": "my-project"}` for ``GCSFileSystem``).
"""
_fs_args = deepcopy(fs_args) or {}
_credentials = deepcopy(credentials) or {}
protocol, path = get_protocol_and_path(filepath, version)
if protocol == "file":
_fs_args.setdefault("auto_mkdir", True)
self._protocol = protocol
self._storage_options = {**_credentials, **_fs_args}
self._fs = fsspec.filesystem(self._protocol, **self._storage_options)
super().__init__(
filepath=PurePosixPath(path),
version=version,
exists_function=self._fs.exists,
glob_function=self._fs.glob,
)
# Handle default load and save arguments
self._load_args = deepcopy(self.DEFAULT_LOAD_ARGS)
if load_args is not None:
self._load_args.update(load_args)
self._save_args = deepcopy(self.DEFAULT_SAVE_ARGS)
if save_args is not None:
self._save_args.update(save_args)
if "storage_options" in self._save_args or "storage_options" in self._load_args:
logger.warning(
"Dropping `storage_options` for %s, "
"please specify them under `fs_args` or `credentials`.",
self._filepath,
)
self._save_args.pop("storage_options", None)
self._load_args.pop("storage_options", None)
def _describe(self) -> Dict[str, Any]:
return dict(
filepath=self._filepath,
protocol=self._protocol,
load_args=self._load_args,
save_args=self._save_args,
version=self._version,
)
def _load(self) -> pd.DataFrame:
load_path = get_filepath_str(self._get_load_path(), self._protocol)
if self._fs.isdir(load_path):
# It doesn't work at least on S3 if root folder was created manually
# https://issues.apache.org/jira/browse/ARROW-7867
data = (
pq.ParquetDataset(load_path, filesystem=self._fs)
.read(**self._load_args)
.to_pandas()
)
else:
data = self._load_from_pandas()
return data
def _load_from_pandas(self):
load_path = str(self._get_load_path())
if self._protocol == "file":
# file:// protocol seems to misbehave on Windows
# (<urlopen error file not on local host>),
# so we don't join that back to the filepath;
# storage_options also don't work with local paths
return pd.read_parquet(load_path, **self._load_args)
load_path = f"{self._protocol}{PROTOCOL_DELIMITER}{load_path}"
return pd.read_parquet(
load_path, storage_options=self._storage_options, **self._load_args
)
def _save(self, data: pd.DataFrame) -> None:
save_path = get_filepath_str(self._get_save_path(), self._protocol)
if Path(save_path).is_dir():
raise DataSetError(
f"Saving {self.__class__.__name__} to a directory is not supported."
)
if "partition_cols" in self._save_args:
raise DataSetError(
f"{self.__class__.__name__} does not support save argument "
f"`partition_cols`. Please use `kedro.io.PartitionedDataSet` instead."
)
bytes_buffer = BytesIO()
data.to_parquet(bytes_buffer, **self._save_args)
with self._fs.open(save_path, mode="wb") as fs_file:
fs_file.write(bytes_buffer.getvalue())
self._invalidate_cache()
def _exists(self) -> bool:
try:
load_path = get_filepath_str(self._get_load_path(), self._protocol)
except DataSetError:
return False
return self._fs.exists(load_path)
def _release(self) -> None:
super()._release()
self._invalidate_cache()
def _invalidate_cache(self) -> None:
"""Invalidate underlying filesystem caches."""
filepath = get_filepath_str(self._filepath, self._protocol)
self._fs.invalidate_cache(filepath)
| 40.890351
| 134
| 0.644642
|
4a05e9c86052bb44d80b048c87a4d4415d10cc4b
| 705
|
py
|
Python
|
djact/apps/authentication/serializers.py
|
baoooliang/ColleegeApp
|
1030cf3af34f8a98ca88327511956e7289c85fe4
|
[
"MIT"
] | 9
|
2020-08-28T08:27:18.000Z
|
2022-03-27T12:54:28.000Z
|
djact/apps/authentication/serializers.py
|
baoooliang/ColleegeApp
|
1030cf3af34f8a98ca88327511956e7289c85fe4
|
[
"MIT"
] | 5
|
2020-08-05T18:38:07.000Z
|
2022-02-28T15:53:04.000Z
|
djact/apps/authentication/serializers.py
|
baoooliang/ColleegeApp
|
1030cf3af34f8a98ca88327511956e7289c85fe4
|
[
"MIT"
] | 7
|
2020-08-10T17:28:07.000Z
|
2021-07-07T15:01:13.000Z
|
from rest_framework import serializers
from .models import User
class UserSerializer(serializers.ModelSerializer):
email = serializers.EmailField(required=True)
username = serializers.CharField()
password = serializers.CharField(min_length=8, write_only=True)
class Meta:
model = User
fields = ('email', 'username', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
| 28.2
| 67
| 0.666667
|
4a05eaa125af0c2c385fbe7c9dcd329ba86fadf3
| 267
|
py
|
Python
|
codeEval/medium/overlapping_rectangles.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | 1
|
2020-09-30T19:53:08.000Z
|
2020-09-30T19:53:08.000Z
|
codeEval/medium/overlapping_rectangles.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | null | null | null |
codeEval/medium/overlapping_rectangles.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | 1
|
2020-10-15T09:10:57.000Z
|
2020-10-15T09:10:57.000Z
|
import sys
def overlap(ls):
if max(ls[0], ls[4]) > min(ls[2], ls[6]) or max(ls[3], ls[7]) > min(ls[1], ls[5]):
return False
return True
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
print overlap(map(int, test.split(",")))
test_cases.close()
| 24.272727
| 84
| 0.621723
|
4a05eabb29b4b4f0d46a3dfcaddbd1880f14746e
| 20,799
|
py
|
Python
|
reference/django-survey/survey/views.py
|
FiaDot/programmer-competency-matrix
|
c44a328e9b51ab9ade1e36798dfff50741d74ae5
|
[
"MIT"
] | 2
|
2018-03-11T22:46:12.000Z
|
2018-03-13T01:30:08.000Z
|
reference/django-survey/survey/views.py
|
FiaDot/programmer-competency-matrix
|
c44a328e9b51ab9ade1e36798dfff50741d74ae5
|
[
"MIT"
] | null | null | null |
reference/django-survey/survey/views.py
|
FiaDot/programmer-competency-matrix
|
c44a328e9b51ab9ade1e36798dfff50741d74ae5
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import os
from django.db import models
from django.db.models import Q
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import redirect_to_login
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.http import HttpResponseNotFound
from django.template import loader, RequestContext
from django.template.defaultfilters import slugify
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.translation import ugettext_lazy as _
from django.views.generic.list_detail import object_list
from django.views.generic.create_update import delete_object
from survey.forms import forms_for_survey, SurveyForm, QuestionForm, ChoiceForm
from survey.models import Survey, Answer, Question, Choice
def _survey_redirect(request, survey,
group_slug=None, group_slug_field=None, group_qs=None,
template_name = 'survey/thankyou.html',
extra_context=None,
*args, **kw):
"""
Conditionally redirect to the appropriate page;
if there is a "next" value in the GET URL parameter,
go to the URL specified under Next.
If there is no "next" URL specified, then go to
the survey results page...but only if it is viewable
by the user.
Otherwise, only direct the user to a page showing
their own survey answers...assuming they have answered
any questions.
If all else fails, go to the Thank You page.
"""
if ('next' in request.REQUEST and
request.REQUEST['next'].startswith('http:') and
request.REQUEST['next'] != request.path):
return HttpResponseRedirect(request.REQUEST['next'])
if survey.answers_viewable_by(request.user):
return HttpResponseRedirect(reverse('survey-results', None, (),
{'survey_slug': survey.slug}))
# For this survey, have they answered any questions?
if (hasattr(request, 'session') and Answer.objects.filter(
session_key=request.session.session_key.lower(),
question__survey__visible=True,
question__survey__slug=survey.slug).count()):
return HttpResponseRedirect(
reverse('answers-detail', None, (),
{'survey_slug': survey.slug,
'key': request.session.session_key.lower()}))
# go to thank you page
return render_to_response(template_name,
{'survey': survey, 'title': _('Thank You')},
context_instance=RequestContext(request))
def survey_detail(request, survey_slug,
group_slug=None, group_slug_field=None, group_qs=None,
template_name = 'survey/survey_detail.html',
extra_context=None,
allow_edit_existing_answers=False,
*args, **kw):
"""
"""
survey = get_object_or_404(Survey.objects.filter(visible=True), slug=survey_slug)
if survey.closed:
if survey.answers_viewable_by(request.user):
return HttpResponseRedirect(reverse('survey-results', None, (),
{'survey_slug': survey_slug}))
raise Http404 #(_('Page not found.')) # unicode + exceptions = bad
# if user has a session and have answered some questions
# and the survey does not accept multiple answers,
# go ahead and redirect to the answers, or a thank you
if (hasattr(request, 'session') and
survey.has_answers_from(request.session.session_key) and
not survey.allows_multiple_interviews and not allow_edit_existing_answers):
return _survey_redirect(request, survey,group_slug=group_slug)
# if the survey is restricted to authentified user redirect
# annonymous user to the login page
if survey.restricted and str(request.user) == "AnonymousUser":
return HttpResponseRedirect(reverse("auth_login")+"?next=%s" % request.path)
if request.POST and not hasattr(request, 'session'):
return HttpResponse(unicode(_('Cookies must be enabled.')), status=403)
if hasattr(request, 'session'):
skey = 'survey_%d' % survey.id
request.session[skey] = (request.session.get(skey, False) or
request.method == 'POST')
request.session.modified = True ## enforce the cookie save.
survey.forms = forms_for_survey(survey, request, allow_edit_existing_answers)
if (request.POST and all(form.is_valid() for form in survey.forms)):
for form in survey.forms:
form.save()
return _survey_redirect(request, survey,group_slug=group_slug)
# Redirect either to 'survey.template_name' if this attribute is set or
# to the default template
return render_to_response(survey.template_name or template_name,
{'survey': survey,
'title': survey.title,
'group_slug': group_slug},
context_instance=RequestContext(request))
# TODO: ajaxify this page (jquery) : add a date picker, ...
# TODO: Fix the bug that make the questions and the choices unordered
@login_required()
def survey_edit(request,survey_slug,
group_slug=None, group_slug_field=None, group_qs=None,
template_name = "survey/survey_edit.html",
extra_context=None,
*args, **kw):
survey = get_object_or_404(Survey, slug=survey_slug)
return render_to_response(template_name,
{'survey': survey,
'group_slug': group_slug},
context_instance=RequestContext(request))
# TODO: Refactor the object add to avoid the code duplication.
# def object_add(request, object, form, template_name,
# post_create_redirect, extra_context):
@login_required()
def survey_add(request,
group_slug=None, group_slug_field=None, group_qs=None,
template_name = 'survey/survey_add.html',
extra_context=None,
*args, **kw):
if request.method == "POST":
request_post = request.POST.copy()
survey_form = SurveyForm(request_post)
if survey_form.is_valid():
new_survey = survey_form.save(commit=False)
new_survey.created_by = request.user
new_survey.editable_by = request.user
new_survey.slug = slugify(new_survey.title)
if group_slug:
group = get_object_or_404(group_qs,slug=group_slug)
new_survey.recipient = group
new_survey.save()
return HttpResponseRedirect(reverse("surveys-editable",kwargs={}))
else:
survey_form = SurveyForm()
return render_to_response(template_name,
{'title': _("Add a survey"),
'form' : survey_form},
context_instance=RequestContext(request))
@login_required()
def survey_update(request, survey_slug,
group_slug=None, group_slug_field=None, group_qs=None,
template_name = 'survey/survey_add.html',
extra_context=None,
*args, **kw):
if request.method == "POST":
request_post = request.POST.copy()
survey = get_object_or_404(Survey, slug=survey_slug)
survey_form = SurveyForm(instance=survey,data=request_post)
if survey_form.is_valid():
new_survey = survey_form.save(commit=False)
new_survey.created_by = request.user
new_survey.editable_by = request.user
new_survey.slug = slugify(new_survey.title)
new_survey.save()
return HttpResponseRedirect(reverse("survey-edit",None,(),{"survey_slug":survey_slug}))
else:
survey = get_object_or_404(Survey, slug=survey_slug)
survey_form = SurveyForm(instance=survey)
return render_to_response(template_name,
{'title': _("Update '%s'") % survey.title,
'survey' : survey,
'form' : survey_form},
context_instance=RequestContext(request))
@login_required()
def survey_delete(request,survey_slug=None,
group_slug=None, group_slug_field=None,
group_qs=None,
template_name = "survey/editable_survey_list.html",
extra_context=None,
*args, **kw):
# TRICK: The following line does not have any logical explination
# except than working around a bug in FF. It has been suggested there
# http://groups.google.com/group/django-users/browse_thread/thread/e6c96ab0538a544e/0e01cdda3668dfce#0e01cdda3668dfce
request_post = request.POST.copy()
return delete_object(request, slug=survey_slug,
**{"model":Survey,
"post_delete_redirect": reverse("surveys-editable",kwargs={}),
"template_object_name":"survey",
"login_required": True,
'extra_context': {'title': _('Delete survey')}
})
@login_required()
def question_add(request,survey_slug,
group_slug=None, group_slug_field=None,
group_qs=None,
template_name = 'survey/question_add.html',
extra_context=None,
*args, **kw):
survey = get_object_or_404(Survey, slug=survey_slug)
if request.method == "POST":
request_post = request.POST.copy()
question_form = QuestionForm(data=request_post,files=request.FILES)
if question_form.is_valid():
new_question = question_form.save(commit=False)
new_question.survey = survey
new_question.save()
return HttpResponseRedirect(reverse("survey-edit",None,(),
{"survey_slug":survey_slug}))
else:
question_form = QuestionForm()
return render_to_response(template_name,
{'title': _("Add a question"),
'form' : question_form},
context_instance=RequestContext(request))
@login_required()
def question_update(request,survey_slug,question_id,
group_slug=None, group_slug_field=None,
group_qs=None,
template_name = 'survey/question_add.html',
extra_context=None,
*args, **kw):
survey = get_object_or_404(Survey, slug=survey_slug)
question = get_object_or_404(Question,id=question_id)
if question not in survey.questions.iterator():
raise Http404()
if request.method == "POST":
request_post = request.POST.copy()
question_form = QuestionForm(instance=question,data=request_post,
files=request.FILES)
if question_form.is_valid():
new_question = question_form.save(commit=False)
new_question.survey = survey
new_question.save()
return HttpResponseRedirect(reverse("survey-edit",None,(),
{"survey_slug":survey_slug}))
else:
question_form = QuestionForm(instance=question)
return render_to_response(template_name,
{'title': _("Update question"),
'question' : question,
'model_string' : "Question",
'form' : question_form},
context_instance=RequestContext(request))
@login_required()
def question_delete(request,survey_slug,question_id,
group_slug=None, group_slug_field=None,
group_qs=None,
template_name = None,
extra_context=None,
*args, **kw):
# TRICK: The following line does not have any logical explination
# except than working around a bug in FF. It has been suggested there
# http://groups.google.com/group/django-users/browse_thread/thread/e6c96ab0538a544e/0e01cdda3668dfce#0e01cdda3668dfce
request_post = request.POST.copy()
return delete_object(request, object_id=question_id,
**{"model":Question,
"post_delete_redirect": reverse("survey-edit",None,(),
{"survey_slug":survey_slug,
"group_slug":group_slug}),
"template_object_name":"question",
"login_required": True,
'extra_context': {'title': _('Delete question')}
})
@login_required()
def choice_add(request,question_id,
group_slug=None, group_slug_field=None,
group_qs=None,
template_name = 'survey/choice_add.html',
extra_context=None,
*args, **kw):
question = get_object_or_404(Question, id=question_id)
if request.method == "POST":
request_post = request.POST.copy()
choice_form = ChoiceForm(data=request_post,files=request.FILES)
if choice_form.is_valid():
new_choice = choice_form.save(commit=False)
new_choice.question = question
new_choice.save()
return HttpResponseRedirect(reverse("survey-edit",None,(),
{"survey_slug":question.survey.slug}))
else:
choice_form = ChoiceForm()
return render_to_response(template_name,
{'title': _("Add a choice"),
'form' : choice_form},
context_instance=RequestContext(request))
@login_required()
def choice_update(request,question_id, choice_id,
group_slug=None, group_slug_field=None,
group_qs=None,
template_name = 'survey/choice_add.html',
extra_context=None,
*args, **kw):
question = get_object_or_404(Question, id=question_id)
choice = get_object_or_404(Choice, id=choice_id)
if choice not in question.choices.iterator():
raise Http404()
if request.method == "POST":
request_post = request.POST.copy()
choice_form = ChoiceForm(instance=choice,data=request_post,
files=request.FILES)
if choice_form.is_valid():
new_choice = choice_form.save(commit=False)
new_choice.question = question
new_choice.save()
return HttpResponseRedirect(reverse("survey-edit",None,(),
{"survey_slug":question.survey.slug}))
else:
choice_form = ChoiceForm(instance=choice)
return render_to_response(template_name,
{'title': _("Update choice"),
'choice' : choice,
'model_string' : "Choice",
'form' : choice_form},
context_instance=RequestContext(request))
@login_required()
def choice_delete(request,survey_slug,choice_id,
group_slug=None, group_slug_field=None,
group_qs=None,
template_name = 'survey/choice_add.html',
extra_context=None,
*args, **kw):
# TRICK: The following line does not have any logical explination
# except than working around a bug in FF. It has been suggested there
# http://groups.google.com/group/django-users/browse_thread/thread/e6c96ab0538a544e/0e01cdda3668dfce#0e01cdda3668dfce
request_post = request.POST.copy()
return delete_object(request, object_id=choice_id,
**{"model":Choice,
"post_delete_redirect": reverse("survey-edit",None,(),
{"survey_slug":survey_slug}),
"template_object_name":"choice",
"login_required": True,
'extra_context': {'title': _('Delete choice')}
})
def visible_survey_list(request,
group_slug=None, group_slug_field=None, group_qs=None,
login_required = False,
template_name = "survey/survey_list.html",
extra_context=None,
*args, **kw):
login_user= request.user
if login_required and not login_user.is_authenticated():
return redirect_to_login(request.path)
else:
return object_list(request,
**{ 'queryset': Survey.objects.filter(visible=True),
'allow_empty': True,
'template_name':template_name,
'extra_context': {'title': _('Surveys')}}
)
@login_required()
def editable_survey_list(request,
group_slug=None, group_slug_field=None, group_qs=None,
template_name = "survey/editable_survey_list.html",
extra_context=None,
*args, **kw):
login_user= request.user
return object_list(request,
**{ 'queryset': Survey.objects.filter(Q(created_by=login_user) |
Q(editable_by=login_user)),
'allow_empty': True,
'template_name':template_name,
'extra_context': {'title': _('Surveys'),
'group_slug': group_slug
}
})
def answers_list(request, survey_slug,
group_slug=None, group_slug_field=None, group_qs=None,
template_name = 'survey/answers_list.html',
extra_context=None,
*args, **kw):
"""
Shows a page showing survey results for an entire survey.
"""
survey = get_object_or_404(Survey.objects.filter(visible=True), slug=survey_slug)
# if the user lacks permissions, show an "Insufficient Permissions page"
if not survey.answers_viewable_by(request.user):
if (hasattr(request, 'session') and
survey.has_answers_from(request.session.session_key)):
return HttpResponseRedirect(
reverse('answers-detail', None, (),
{'survey_slug': survey.slug,
'key': request.session.session_key.lower()}))
return HttpResponse(unicode(_('Insufficient Privileges.')), status=403)
return render_to_response(template_name,
{ 'survey': survey,
'view_submissions': request.user.has_perm('survey.view_submissions'),
'title': survey.title + u' - ' + unicode(_('Results'))},
context_instance=RequestContext(request))
def answers_detail(request, survey_slug, key,
group_slug=None, group_slug_field=None, group_qs=None,
template_name = 'survey/answers_detail.html',
extra_context=None,
*args, **kw):
"""
Shows a page with survey results for a single person.
If the user lacks permissions, show an "Insufficient Permissions page".
"""
answers = Answer.objects.filter(session_key=key.lower(),
question__survey__visible=True, question__survey__slug=survey_slug)
if not answers.count(): raise Http404
survey = answers[0].question.survey
mysubmission = (hasattr(request, 'session') and
request.session.session_key.lower() == key.lower())
if (not mysubmission and
(not request.user.has_perm('survey.view_submissions') or
not survey.answers_viewable_by(request.user))):
return HttpResponse(unicode(_('Insufficient Privileges.')), status=403)
return render_to_response(template_name,
{'survey': survey, 'submission': answers,
'title': survey.title + u' - ' + unicode(_('Submission'))},
context_instance=RequestContext(request))
def delete_image(request, model_string,object_id):
model = models.get_model("survey", model_string)
object = get_object_or_404(model, id=object_id)
if object.image == None:
raise Http404('No image for the given object : %s ' %object)
if request.method == "POST":
request_post = request.POST.copy()
if os.path.isfile(object.get_image_filename()):
os.remove(object.get_image_filename())
object.image = None
object.save()
return HttpResponseRedirect(object.get_update_url())
return render_to_response('survey/image_confirm_delete.html',
{"object" : object},
context_instance=RequestContext(request))
| 44.253191
| 121
| 0.60926
|
4a05eb3f34e6f5cf2523239c05d9ce4ebb1b0b5d
| 199,126
|
py
|
Python
|
python/cudf/cudf/tests/test_dataframe.py
|
marlenezw/cudf
|
f6e14facc37fa270d302a8e1c39abffb6675c53e
|
[
"Apache-2.0"
] | 1
|
2021-03-01T05:41:41.000Z
|
2021-03-01T05:41:41.000Z
|
python/cudf/cudf/tests/test_dataframe.py
|
marlenezw/cudf
|
f6e14facc37fa270d302a8e1c39abffb6675c53e
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/tests/test_dataframe.py
|
marlenezw/cudf
|
f6e14facc37fa270d302a8e1c39abffb6675c53e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018-2020, NVIDIA CORPORATION.
import array as arr
import io
import operator
import random
import re
import textwrap
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf as gd
from cudf.core.column import column
from cudf.core.dataframe import DataFrame, Series
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = DataFrame(data)
assert_eq(pdf, gdf)
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = DataFrame(data)
assert_eq(pdf, gdf, check_like=True)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = gd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = gd.Series(dict_of_series[key])
gdf = gd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = gd.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = gd.DataFrame(
{
"a": gd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": gd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = Series([data_np[0], data_np[2]])
s_cp = Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = Series()
got = sr1.to_string()
print(got)
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = Series(None)
got = sr2.to_string()
print(got)
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = gd.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
print(expect)
print(mat)
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
def test_dataframe_drop_method():
df = DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
df["c"] = data
assert tuple(df.columns) == ("a", "b", "c")
assert tuple(df.drop("a").columns) == ("b", "c")
assert tuple(df.drop("a", axis=1).columns) == ("b", "c")
assert tuple(df.columns) == ("a", "b", "c")
assert tuple(df.drop(["a", "b"]).columns) == ("c",)
assert tuple(df.drop(["a", "a", "b"]).columns) == ("c",)
assert tuple(df.columns) == ("a", "b", "c")
assert tuple(df.drop(["a", "b"]).columns) == ("c",)
assert tuple(df.columns) == ("a", "b", "c")
assert tuple(df.drop(columns=["a", "b"]).columns) == ("c",)
assert tuple(df.columns) == ("a", "b", "c")
assert tuple(df.drop(columns="a").columns) == ("b", "c")
assert tuple(df.columns) == ("a", "b", "c")
assert tuple(df.drop(columns=["a"]).columns) == ("b", "c")
assert tuple(df.columns) == ("a", "b", "c")
assert tuple(df.drop(columns=["a", "b", "c"]).columns) == tuple()
assert tuple(df.columns) == ("a", "b", "c")
# Test drop error
with pytest.raises(NameError) as raises:
df.drop("d")
raises.match("column 'd' does not exist")
with pytest.raises(NameError) as raises:
df.drop(["a", "d", "b"])
raises.match("column 'd' does not exist")
with pytest.raises(ValueError) as raises:
df.drop("a", axis=1, columns="a")
raises.match("Cannot specify both")
with pytest.raises(ValueError) as raises:
df.drop(axis=1)
raises.match("Need to specify at least")
def test_dataframe_column_add_drop_via_setitem():
df = DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
got = gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas(nullable_pd_dtype=False)
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
gdf = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
rename_mapper = {"a": "z", "b": "z", "c": "z"}
expect = DataFrame({"z": [1, 2, 3], "z_1": [4, 5, 6], "z_2": [7, 8, 9]})
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]})
string = str(df)
print(string)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
print(string)
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]})
data = np.arange(6)
mask = np.zeros(1, dtype=gd.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is None
pd.options.display.max_rows = 10
got = df.to_string()
print(got)
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide():
# Test basic
df = DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
print(got)
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = DataFrame()
got = df.to_string()
print(got)
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
print(got)
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
from copy import copy
df = DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
print(got)
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
print(got)
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = DataFrame({k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()})
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_assign():
gdf = DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas(nullable_pd_dtype=False)
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = (
row.key + 100 if valid else np.iinfo(gdf["val"].dtype).min
)
got_value = row.val
assert expected_value == got_value
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas(nullable_pd_dtype=False)
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = (
row.val - 100 if valid else np.iinfo(gdf["val"].dtype).min
)
got_value = row.key
assert expected_value == got_value
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(ValueError):
gd.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = gd.concat([gd.from_pandas(df1), gd.from_pandas(df2)])
assert_eq(gd.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = gd.Series([42], dtype=np.float)
df2 = gd.Series(["a"], dtype="category")
with pytest.raises(ValueError):
gd.concat([df1, df2])
df2 = gd.Series(["a string"])
with pytest.raises(TypeError):
gd.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [DataFrame({"a": [1, 2], "b": [1, 3]}), DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [DataFrame({"a": [], "b": []}), DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = gd.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = gd.concat(
[DataFrame(df1_d), DataFrame(df2_d), DataFrame(df1_d)], sort=False
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize("ser_1", [pd.Series([1, 2, 3]), pd.Series([])])
@pytest.mark.parametrize("ser_2", [pd.Series([])])
def test_concat_empty_series(ser_1, ser_2):
got = gd.concat([Series(ser_1), Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = gd.from_pandas(df1)
cdf2 = gd.from_pandas(df2)
# concat only dataframes
concat_cdf = gd.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = gd.Series.from_pandas(df1.x)
cs2 = gd.Series.from_pandas(df1.y)
concat_cdf_s = gd.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = gd.Series.from_pandas(s3)
concat_cdf_all = gd.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = gd.from_pandas(df1)
midf1.index = gd.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = gd.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas(nullable_pd_dtype=False)
mipdf2 = midf2.to_pandas(nullable_pd_dtype=False)
assert_eq(gd.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(gd.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
gd.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = gd.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas(nullable_pd_dtype=False)
pdg2 = gdg2.to_pandas(nullable_pd_dtype=False)
assert_eq(gd.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(gd.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas(nullable_pd_dtype=False)
pdgz2 = gdgz2.to_pandas(nullable_pd_dtype=False)
assert_eq(gd.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(gd.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = gd.DataFrame.from_pandas(df)
assert isinstance(gdf, gd.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = gd.Series.from_pandas(s)
assert isinstance(gs, gd.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = gd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, gd.DataFrame)
assert_eq(df, gdf)
gdf = gd.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, gd.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = gd.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, gd.DataFrame)
assert_eq(df, gdf)
def test_from_gpu_matrix():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = gd.DataFrame.from_gpu_matrix(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, gd.DataFrame)
assert_eq(df, gdf)
gdf = gd.DataFrame.from_gpu_matrix(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, gd.DataFrame)
assert_eq(df, gdf)
gdf = gd.DataFrame.from_gpu_matrix(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, gd.DataFrame)
assert_eq(df, gdf)
gdf = gd.DataFrame.from_gpu_matrix(d_ary, index=0)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, gd.DataFrame)
assert_eq(df, gdf)
gdf = gd.DataFrame.from_gpu_matrix(d_ary, index=1)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, gd.DataFrame)
assert_eq(df, gdf)
def test_from_gpu_matrix_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="matrix dimension expected 2 but found 3"
):
gd.DataFrame.from_gpu_matrix(d_ary)
def test_from_gpu_matrix_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError, match="index length expected 2 but found 1"
):
gd.DataFrame.from_gpu_matrix(d_ary, index=["a"])
with pytest.raises(KeyError):
gd.DataFrame.from_gpu_matrix(d_ary, index="a")
@pytest.mark.xfail(reason="constructor does not coerce index inputs")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = gd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = gd.DataFrame.from_arrow(padf)
assert isinstance(gdf, gd.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = gd.Series.from_arrow(s)
assert isinstance(gs, gd.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = gd.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = gd.Series.from_arrow(s1)
assert isinstance(gs1, gd.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = gd.Series.from_arrow(s2)
assert isinstance(gs2, gd.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = gd.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = gd.Series(pa_cat)
assert isinstance(gd_cat, gd.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = gd.Series(pa_cat)
assert isinstance(gd_cat, gd.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
from datetime import date
scalar = np.datetime64(date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = gd.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = gd.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series()
cs = Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
from string import ascii_lowercase
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
from string import ascii_lowercase
for i in range(num_cols):
colname = ascii_lowercase[i]
data = pd.Series(list(ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return gd.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
print(func(pdf, skipna=skipna))
gdf = DataFrame.from_pandas(pdf)
print(func(gdf, skipna=skipna))
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = DataFrame(data)
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = gd.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, gd.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
def test_quantile(pdf, gdf, q):
assert_eq(pdf["x"].quantile(q), gdf["x"].quantile(q))
assert_eq(pdf.quantile(q), gdf.quantile(q))
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = gd.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = gd.from_pandas(pdf)
assert isinstance(gdf, gd.DataFrame)
assert_eq(pdf, gdf)
gdf = gd.from_pandas(pdf.x)
assert isinstance(gdf, gd.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
gd.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, gd.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = gd.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = gd.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = gd.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = gd.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
import cudf
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = DataFrame.from_pandas(pdf)
gdf_mask = DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas(nullable_pd_dtype=False)
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_series_digitize(num_rows, num_bins, right, dtype):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = Series(data)
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = gd.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data).astype("float64")
got = Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = gd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = DataFrame()
gdf["id"] = Series(["a", "b"], dtype=np.object)
gdf["v"] = Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas(nullable_pd_dtype=False).tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index(pdf, gdf, drop):
for col in pdf.columns:
assert_eq(pdf.set_index(col, drop=drop), gdf.set_index(col, drop=drop))
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = gd.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = gd.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = gd.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = gd.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = gd.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = gd.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = gd.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = gd.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = gd.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = gd.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = gd.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = gd.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = gd.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = gd.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = gd.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas(nullable_pd_dtype=False)
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = gd.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = gd.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
with pytest.raises(TypeError):
assert_eq(
pdf.select_dtypes(include=["Foo"]),
gdf.select_dtypes(include=["Foo"]),
)
with pytest.raises(ValueError):
gdf.select_dtypes(exclude=np.number, include=np.number)
with pytest.raises(ValueError):
pdf.select_dtypes(exclude=np.number, include=np.number)
gdf = DataFrame({"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]})
pdf = gdf.to_pandas(nullable_pd_dtype=False)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = gd.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas(nullable_pd_dtype=False)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
with pytest.raises(ValueError):
pdf.select_dtypes()
with pytest.raises(ValueError):
gdf.select_dtypes()
gdf = gd.DataFrame(
{"a": gd.Series([], dtype="int"), "b": gd.Series([], dtype="str")}
)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = gd.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
assert_eq(gdf[["timestamp"]], gdf.select_dtypes("datetime64"))
assert_eq(gdf[["timestamp"]], gdf.select_dtypes(np.dtype("datetime64")))
assert_eq(gdf[["timestamp"]], gdf.select_dtypes(include="datetime64"))
assert_eq(gdf[["timestamp"]], gdf.select_dtypes("datetime64[ms]"))
assert_eq(
gdf[["timestamp"]], gdf.select_dtypes(np.dtype("datetime64[ms]"))
)
assert_eq(gdf[["timestamp"]], gdf.select_dtypes(include="datetime64[ms]"))
def test_array_ufunc():
gdf = gd.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
def test_series_describe_numeric(dtype):
pdf = pd.Series([0, 1, 2, 3])
gdf = Series.from_pandas(pdf).astype(dtype)
gdf_results = gdf.describe().to_pandas()
pdf_results = gdf.to_pandas().describe()
np.testing.assert_array_almost_equal(
gdf_results.values, pdf_results.values, decimal=4
)
@pytest.mark.xfail(
raises=NotImplementedError,
reason="Describing non-numeric columns is not yet supported.",
)
def test_series_describe_datetime():
pdf = pd.Series([0, 1, 2, 3]).astype("datetime64[ms]")
gdf = Series.from_pandas(pdf)
gdf_results = gdf.describe()
pdf_results = pdf.describe()
np.testing.assert_array_almost_equal(
gdf_results.values, pdf_results.values, decimal=4
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"]).to_pandas()
pdf_results = pdf.describe(exclude=["float"])
np.testing.assert_array_almost_equal(
gdf_results.values, pdf_results.values, decimal=4
)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"]).to_pandas()
pdf_results = pdf.describe(include=["int"])
np.testing.assert_array_almost_equal(
gdf_results.values, pdf_results.values, decimal=4
)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe().to_pandas()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
@pytest.mark.xfail(
raises=AssertionError,
reason="Describing non-numeric columns is not yet supported.",
)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all").to_pandas()
pdf_results = pdf.describe(include="all")
np.testing.assert_array_almost_equal(
gdf_results.values, pdf_results.values, decimal=4
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles).to_pandas()
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = gd.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = DataFrame({"a": Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = DataFrame({"a": Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
def test_isnull_isna():
# float & strings some missing
ps = pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
}
)
ps.index = ["q", "w", "e", "r", "t", "y", "u"]
gs = DataFrame.from_pandas(ps)
assert_eq(ps.a.isnull(), gs.a.isnull())
assert_eq(ps.isnull(), gs.isnull())
assert_eq(ps.a.isna(), gs.a.isna())
assert_eq(ps.isna(), gs.isna())
# integer & string none missing
ps = pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]})
gs = DataFrame.from_pandas(ps)
assert_eq(ps.a.isnull(), gs.a.isnull())
assert_eq(ps.isnull(), gs.isnull())
assert_eq(ps.a.isna(), gs.a.isna())
assert_eq(ps.isna(), gs.isna())
# all missing
ps = pd.DataFrame(
{"a": [None, None, np.nan, None], "b": [np.nan, None, np.nan, None]}
)
gs = DataFrame.from_pandas(ps)
assert_eq(ps.a.isnull(), gs.a.isnull())
assert_eq(ps.isnull(), gs.isnull())
assert_eq(ps.a.isna(), gs.a.isna())
assert_eq(ps.isna(), gs.isna())
# empty
ps = pd.DataFrame({"a": []})
gs = DataFrame.from_pandas(ps)
assert_eq(ps.a.isnull(), gs.a.isnull())
assert_eq(ps.isnull(), gs.isnull())
assert_eq(ps.a.isna(), gs.a.isna())
assert_eq(ps.isna(), gs.isna())
# one missing
ps = pd.DataFrame({"a": [np.nan], "b": [None]})
gs = DataFrame.from_pandas(ps)
assert_eq(ps.a.isnull(), gs.a.isnull())
assert_eq(ps.isnull(), gs.isnull())
assert_eq(ps.a.isna(), gs.a.isna())
assert_eq(ps.isna(), gs.isna())
# strings missing
ps = pd.DataFrame({"a": ["a", "b", "c", None, "e"]})
gs = DataFrame.from_pandas(ps)
assert_eq(ps.a.isnull(), gs.a.isnull())
assert_eq(ps.isnull(), gs.isnull())
assert_eq(ps.a.isna(), gs.a.isna())
assert_eq(ps.isna(), gs.isna())
# strings none missing
ps = pd.DataFrame({"a": ["a", "b", "c", "d", "e"]})
gs = DataFrame.from_pandas(ps)
assert_eq(ps.a.isnull(), gs.a.isnull())
assert_eq(ps.isnull(), gs.isnull())
assert_eq(ps.a.isna(), gs.a.isna())
assert_eq(ps.isna(), gs.isna())
# unnamed series
ps = pd.Series([0, 1, 2, np.nan, 4, None, 6])
gs = Series.from_pandas(ps)
assert_eq(ps.isnull(), gs.isnull())
assert_eq(ps.isna(), gs.isna())
def test_notna_notnull():
# float & strings some missing
ps = pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
}
)
gs = DataFrame.from_pandas(ps)
assert_eq(ps.notna(), gs.notna())
assert_eq(ps.a.notna(), gs.a.notna())
assert_eq(ps.notnull(), gs.notnull())
assert_eq(ps.a.notnull(), gs.a.notnull())
# integer & string none missing
ps = pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]})
gs = DataFrame.from_pandas(ps)
assert_eq(ps.notna(), gs.notna())
assert_eq(ps.a.notna(), gs.a.notna())
assert_eq(ps.notnull(), gs.notnull())
assert_eq(ps.a.notnull(), gs.a.notnull())
# all missing
ps = pd.DataFrame(
{"a": [None, None, np.nan, None], "b": [np.nan, None, np.nan, None]}
)
gs = DataFrame.from_pandas(ps)
assert_eq(ps.notna(), gs.notna())
assert_eq(ps.a.notna(), gs.a.notna())
assert_eq(ps.notnull(), gs.notnull())
assert_eq(ps.a.notnull(), gs.a.notnull())
# empty
ps = pd.DataFrame({"a": []})
gs = DataFrame.from_pandas(ps)
assert_eq(ps.notna(), gs.notna())
assert_eq(ps.a.notna(), gs.a.notna())
assert_eq(ps.notnull(), gs.notnull())
assert_eq(ps.a.notnull(), gs.a.notnull())
# one missing
ps = pd.DataFrame({"a": [np.nan], "b": [None]})
gs = DataFrame.from_pandas(ps)
assert_eq(ps.notna(), gs.notna())
assert_eq(ps.a.notna(), gs.a.notna())
assert_eq(ps.notnull(), gs.notnull())
assert_eq(ps.a.notnull(), gs.a.notnull())
# strings missing
ps = pd.DataFrame({"a": ["a", "b", "c", None, "e"]})
gs = DataFrame.from_pandas(ps)
assert_eq(ps.notna(), gs.notna())
assert_eq(ps.a.notna(), gs.a.notna())
assert_eq(ps.notnull(), gs.notnull())
assert_eq(ps.a.notnull(), gs.a.notnull())
# strings none missing
ps = pd.DataFrame({"a": ["a", "b", "c", "d", "e"]})
gs = DataFrame.from_pandas(ps)
assert_eq(ps.notna(), gs.notna())
assert_eq(ps.a.notna(), gs.a.notna())
assert_eq(ps.notnull(), gs.notnull())
assert_eq(ps.a.notnull(), gs.a.notnull())
# unnamed series
ps = pd.Series([0, 1, 2, np.nan, 4, None, 6])
gs = Series.from_pandas(ps)
assert_eq(ps.notna(), gs.notna())
assert_eq(ps.notnull(), gs.notnull())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series()
gs = Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"arr",
[
np.random.normal(-100, 100, 1000),
np.random.randint(-50, 50, 1000),
np.zeros(100),
np.repeat([-0.6459412758761901], 100),
np.repeat(np.nan, 100),
np.array([1.123, 2.343, np.nan, 0.0]),
],
)
@pytest.mark.parametrize(
"decimal",
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
pytest.param(
-1,
marks=[
pytest.mark.xfail(reason="NotImplementedError: decimals < 0")
],
),
],
)
def test_round(arr, decimal):
pser = pd.Series(arr)
ser = Series(arr)
result = ser.round(decimal)
expected = pser.round(decimal)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
arr = arr.astype("float64") # for pandas nulls
mask = np.random.randint(0, 2, arr.shape[0])
arr[mask == 1] = np.nan
pser = pd.Series(arr)
ser = Series(arr)
result = ser.round(decimal)
expected = pser.round(decimal)
assert_eq(result, expected)
np.array_equal(ser.nullmask.to_array(), result.to_array())
@pytest.mark.parametrize(
"series",
[
Series([1.0, None, np.nan, 4.0], nan_as_null=False),
Series([1.24430, None, np.nan, 4.423530], nan_as_null=False),
Series([1.24430, np.nan, 4.423530], nan_as_null=False),
Series([-1.24430, np.nan, -4.423530], nan_as_null=False),
Series(np.repeat(np.nan, 100)),
],
)
@pytest.mark.parametrize("decimal", [0, 1, 2, 3])
def test_round_nan_as_null_false(series, decimal):
pser = series.to_pandas()
ser = Series(series)
result = ser.round(decimal)
expected = pser.round(decimal)
np.testing.assert_array_almost_equal(
result.to_pandas(), expected, decimal=10
)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = pd.Series(data).replace([None], False)
gdata = Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = pd.Series(data)
gdata = Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = gd.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = DataFrame.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
from cudf.core.column import column
col = column.as_column(Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = Series(col)
pds = pd.Series(pd.Series([]))
assert_eq(pds, gds)
col = column.as_column(Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([])
gds = Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = Series(column.as_column(Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = Series(column.as_column(Series([1.2, 18.0, 9.0]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = Series(column.as_column(Series([1.2, 18.0, 9.0]), dtype="str"))
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = Series(gd.core.index.StringIndex(["1", "18", "9"]), dtype="int")
assert_eq(pds, gds)
def test_one_row_head():
gdf = DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas(nullable_pd_dtype=False)
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = gd.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = gd.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = gd.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = gd.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = gd.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = gd.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = gd.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = gd.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = gd.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = gd.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = gd.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = gd.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(gd.Series(data, dtype="str"), gd.Series(data).astype("str"))
assert_eq(
gd.Series(data, dtype="category"), gd.Series(data).astype("category")
)
assert_eq(
gd.Series(data, dtype="float32"),
gd.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
gd.Series(data, dtype="float32"),
gd.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
gd.Series(data, dtype="datetime64[ms]"),
gd.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
gd.Series(data, dtype="str"),
gd.Series(data, dtype="category").astype("str"),
)
assert_eq(
gd.Series(data, dtype="float32"),
gd.Series(data, dtype="category").astype("float32"),
)
assert_eq(
gd.Series(data, dtype="datetime64[ms]"),
gd.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
gd.Series([1, 2, None, 3], dtype="int32"),
gd.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
gd.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
gd.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
gd.Series(["a", "b", "c", None], dtype="category").to_pandas(),
gd.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
Series(data), Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
gd.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = gd.Series([None, None, None], dtype="category")
expect = gd.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = pd.Series(data)
gds = Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = pd.Series(data)
gds = Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(pd.date_range("2010-01-01", "2010-02-01")),
pd.Series([None, None], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_datetime_value_counts(data, nulls):
psr = pd.Series(data)
if len(data) > 0:
if nulls == "one":
p = np.random.randint(0, len(data))
psr[p] = None
elif nulls == "some":
p = np.random.randint(0, len(data), 2)
psr[p] = None
gsr = Series.from_pandas(psr)
expected = psr.value_counts()
got = gsr.value_counts()
pandas_dict = expected.to_dict()
gdf_dict = got.to_pandas().to_dict()
assert pandas_dict == gdf_dict
@pytest.mark.parametrize("num_elements", [10, 100, 1000])
def test_categorical_value_counts(num_elements):
from string import ascii_letters, digits
# create categorical series
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(
np.random.choice(list(ascii_letters + digits), num_elements),
dtype="category",
)
)
# gdf
gdf = DataFrame()
gdf["a"] = Series.from_categorical(pd_cat)
gdf_value_counts = gdf["a"].value_counts()
# pandas
pdf = pd.DataFrame()
pdf["a"] = pd_cat
pdf_value_counts = pdf["a"].value_counts()
# verify
pandas_dict = pdf_value_counts.to_dict()
gdf_dict = gdf_value_counts.to_pandas().to_dict()
assert pandas_dict == gdf_dict
def test_series_value_counts():
for size in [10 ** x for x in range(5)]:
arr = np.random.randint(low=-1, high=10, size=size)
mask = arr != -1
sr = Series.from_masked_array(arr, Series(mask).as_mask())
sr.name = "col"
df = pd.DataFrame(data=arr[mask], columns=["col"])
expect = df.col.value_counts().sort_index()
got = sr.value_counts().sort_index()
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize("ascending", [True, False])
def test_series_value_counts_optional_arguments(ascending):
psr = pd.Series([1.0, 2.0, 2.0, 3.0, 3.0, 3.0, None])
gsr = Series.from_pandas(psr)
expect = psr.value_counts(ascending=ascending)
got = gsr.value_counts(ascending=ascending)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = pd.Series(data, index=index)
gsr = Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = pd.Series(data)
gsr = Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=[
pytest.mark.xfail(
reason="pandas's failure here seems like a bug "
"given the reverse succeeds"
)
],
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = pd.Series(data)
gsr = Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = pd.Series(data)
gsr = Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = pd.Series(data)
gsr = Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = gd.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = gd.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
with pytest.raises((ValueError, TypeError)):
expected = pmdx.isin(values, level=level)
with pytest.raises(err):
got = gmdx.isin(values, level=level)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
from cudf.utils.dtypes import is_scalar
pdf = data
gdf = gd.from_pandas(pdf)
if is_scalar(values):
with pytest.raises(TypeError):
pdf.isin(values)
with pytest.raises(TypeError):
gdf.isin(values)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
# xref https://github.com/pandas-dev/pandas/issues/34256
pytest.xfail(
"https://github.com/pandas-dev/pandas/issues/34256"
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = gd.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = DataFrame()
gdf["foo"] = Series(data, dtype=dtype)
gdf["bar"] = Series(data, dtype=dtype)
insert_data = Series(data, dtype=dtype)
expect = DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = Series(data, dtype=as_dtype)
gdf = DataFrame()
expect = DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = DataFrame()
expect = DataFrame()
gdf["foo"] = Series(data, dtype="datetime64[ms]")
gdf["bar"] = Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = Series(data, dtype="str")
expect["bar"] = Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = Series(gdf["foo"], dtype="category")
expect["bar"] = Series(gdf["bar"], dtype="category")
else:
expect["foo"] = Series(data, dtype=as_dtype)
expect["bar"] = Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = gd.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = Series(data, dtype=dtype)
expect = DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
gd.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
gd.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
gd.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
pytest.param(
gd.DataFrame(),
marks=[
pytest.mark.xfail(
reason="_apply_support_method fails on empty dataframes."
)
],
),
pytest.param(
gd.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
marks=[
pytest.mark.xfail(
reason="Rowwise ops do not currently support nulls."
)
],
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops(data, op):
gdf = data
pdf = gdf.to_pandas(nullable_pd_dtype=False)
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0)
got = getattr(gdf, op)(axis=1, ddof=0)
else:
expected = getattr(pdf, op)(axis=1)
got = getattr(gdf, op)(axis=1)
assert_eq(expected, got, check_less_precise=7)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = gd.datasets.randomdata(10)
pdf = gdf.to_pandas(nullable_pd_dtype=False)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = gd.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
Series([1, 2, 3]),
Series([1, 2, 3], index=["a", "b", "c"]),
Series([1, 2, 3], index=["a", "b", "d"]),
Series([1, 2], index=["a", "b"]),
Series([1, 2, 3], index=gd.core.index.RangeIndex(0, 3)),
pytest.param(
Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas(nullable_pd_dtype=False)
expect = op(pdf, psr)
got = op(gdf, gsr)
assert_eq(expect.astype(float), got.astype(float))
expect = op(psr, pdf)
got = op(psr, pdf)
assert_eq(expect.astype(float), got.astype(float))
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
@pytest.mark.parametrize(
"gsr", [Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas(nullable_pd_dtype=False)
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = gd.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = gd.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = gd.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = gd.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = gd.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All values must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(gd.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(gd.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(gd.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = gd.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, gd.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = gd.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
from cudf._lib.table import Table
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = Table(odict)
result = DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = gd.from_pandas(pd_idx1)
gd_idx2 = gd.from_pandas(pd_idx2)
gd_series1 = gd.Series([1, 2, 3], index=gd_idx1)
gd_series2 = gd.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = gd.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = gd.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = gd.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = gd.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = gd.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
with pytest.raises(error):
ps_where.where(ps_condition, other=ps_other, inplace=inplace)
with pytest.raises(error):
gs_where.where(gs_condition, other=gs_other, inplace=inplace)
with pytest.raises(error):
ps_mask.mask(ps_condition, other=ps_other, inplace=inplace)
with pytest.raises(error):
gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = gd.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = gd.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = gd.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
df = gd.from_pandas(data)
with pytest.raises(error):
df = gd.Series(data)
with pytest.raises(error):
df = gd.from_pandas(pdf)
with pytest.raises(error):
df = gd.DataFrame(pdf)
else:
df = gd.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = gd.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = gd.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = gd.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = gd.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = Series(column.as_column(data, nan_as_null=nan_as_null))
got = gd.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas(nullable_pd_dtype=False)
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = Series([1, 2])
psr = gsr.to_pandas(nullable_pd_dtype=False)
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = Series([1, 2])
psr = gsr.to_pandas(nullable_pd_dtype=False)
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas(nullable_pd_dtype=False)
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = Series([1, 2])
psr = gsr.to_pandas(nullable_pd_dtype=False)
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
with pytest.raises(KeyError):
gsr.astype(dtype={"a": "float"}, copy=copy)
with pytest.raises(KeyError):
psr.astype(dtype={"a": "float"}, copy=copy)
gsr = Series([1, 2])
psr = gsr.to_pandas(nullable_pd_dtype=False)
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
gd.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
gd.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
gd.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
gd.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = gd.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = gd.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = gd.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = gd.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = gd.isclose(
gd.Series(data1), gd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = gd.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = gd.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = gd.isclose(np.array(data1), np.array(data2), rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = gd.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = gd.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = gd.isclose(
gd.Series(data1), gd.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = gd.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = gd.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = gd.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = gd.Series([True] * 6, index=s1.index)
assert_eq(expected, gd.isclose(s1, s2))
s1 = gd.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = gd.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = gd.Series([True, True, True, False, True, True], index=s1.index)
assert_eq(expected, gd.isclose(s1, s2))
s1 = gd.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = gd.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = gd.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, gd.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = gd.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = gd.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"]),
pd.Series(index=[10, 11, 12]),
pd.Series(),
pd.Series([]),
],
)
def test_series_keys(ps):
gds = gd.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = gd.from_pandas(df)
other_gd = gd.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = gd.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = gd.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
],
[
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
],
[pd.DataFrame([]), pd.DataFrame([], index=[100])],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = gd.from_pandas(df)
other_gd = [
gd.from_pandas(o) if isinstance(o, pd.DataFrame) else o for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[[1, 2], [10, 100]],
[[1, 2, 10, 100, 0.1, 0.2, 0.0021]],
[[]],
[[], [], [], []],
[[0.23, 0.00023, -10.00, 100, 200, 1000232, 1232.32323]],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = gd.from_pandas(df)
other_gd = [
gd.from_pandas(o) if isinstance(o, pd.DataFrame) else o for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_error():
df = gd.DataFrame({"a": [1, 2, 3]})
ps = gd.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Can only append a Series if ignore_index=True "
"or if the Series has a name",
):
df.append(ps)
def test_cudf_arrow_array_error():
df = gd.DataFrame({"a": [1, 2, 3]})
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Table via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Table, consider "
"using .to_arrow()",
):
df.__arrow_array__()
sr = gd.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
sr = gd.Series(["a", "b", "c"])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("axis", [0, 1])
def test_dataframe_sample_basic(n, frac, replace, axis):
# as we currently don't support column with same name
if axis == 1 and replace:
return
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5],
"float": [0.05, 0.2, 0.3, 0.2, 0.25],
"int": [1, 3, 5, 4, 2],
},
index=[1, 2, 3, 4, 5],
)
df = DataFrame.from_pandas(pdf)
random_state = 0
kind = None
try:
pout = pdf.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
except BaseException as e:
kind = type(e)
msg = str(e)
if kind is not None:
with pytest.raises(kind, match=msg):
gout = df.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
else:
gout = df.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
if kind is not None:
return
assert pout.shape == gout.shape
@pytest.mark.parametrize("replace", [True, False])
def test_dataframe_reproducibility(replace):
df = DataFrame({"a": cupy.arange(0, 1024)})
expected = df.sample(1024, replace=replace, random_state=1)
out = df.sample(1024, replace=replace, random_state=1)
assert_eq(expected, out)
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
def test_series_sample_basic(n, frac, replace):
psr = pd.Series([1, 2, 3, 4, 5])
sr = Series.from_pandas(psr)
random_state = 0
kind = None
try:
pout = psr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
except BaseException as e:
kind = type(e)
msg = str(e)
if kind is not None:
with pytest.raises(kind, match=msg):
gout = sr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
else:
gout = sr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
if kind is not None:
return
assert pout.shape == gout.shape
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_empty(df):
pdf = df
gdf = gd.from_pandas(pdf)
assert_eq(pdf.empty, gdf.empty)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_size(df):
pdf = df
gdf = gd.from_pandas(pdf)
assert_eq(pdf.size, gdf.size)
@pytest.mark.parametrize(
"ps",
[
pd.Series(),
pd.Series(index=[100, 10, 1, 0]),
pd.Series([]),
pd.Series(["a", "b", "c", "d"]),
pd.Series(["a", "b", "c", "d"], index=[0, 1, 10, 11]),
],
)
def test_series_empty(ps):
ps = ps
gs = gd.from_pandas(ps)
assert_eq(ps.empty, gs.empty)
@pytest.mark.parametrize(
"data",
[
[],
[1],
{"a": [10, 11, 12]},
{
"a": [10, 11, 12],
"another column name": [12, 22, 34],
"xyz": [0, 10, 11],
},
],
)
@pytest.mark.parametrize("columns", [["a"], ["another column name"], None])
def test_dataframe_init_with_columns(data, columns):
pdf = pd.DataFrame(data, columns=columns)
gdf = gd.DataFrame(data, columns=columns)
assert_eq(
pdf,
gdf,
check_index_type=False if len(pdf.index) == 0 else True,
check_dtype=False if pdf.empty and len(pdf.columns) else True,
)
@pytest.mark.parametrize(
"data, ignore_dtype",
[
([pd.Series([1, 2, 3])], False),
([pd.Series(index=[1, 2, 3])], False),
([pd.Series(name="empty series name")], False),
([pd.Series([1]), pd.Series([]), pd.Series([3])], False),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([]),
pd.Series([3], name="series that is named"),
],
False,
),
([pd.Series([1, 2, 3], name="hi")] * 10, False),
([pd.Series([1, 2, 3], name=None, index=[10, 11, 12])] * 10, False),
(
[
pd.Series([1, 2, 3], name=None, index=[10, 11, 12]),
pd.Series([1, 2, 30], name=None, index=[13, 144, 15]),
],
True,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([]),
pd.Series(index=[10, 11, 12]),
],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], name="abc"),
pd.Series(index=[10, 11, 12]),
],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([1, -100, 200, -399, 400], name="abc"),
pd.Series([111, 222, 333], index=[10, 11, 12]),
],
False,
),
],
)
@pytest.mark.parametrize(
"columns", [None, ["0"], [0], ["abc"], [144, 13], [2, 1, 0]]
)
def test_dataframe_init_from_series_list(data, ignore_dtype, columns):
gd_data = [gd.from_pandas(obj) for obj in data]
expected = pd.DataFrame(data, columns=columns)
actual = gd.DataFrame(gd_data, columns=columns)
if ignore_dtype:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data, ignore_dtype, index",
[
([pd.Series([1, 2, 3])], False, ["a", "b", "c"]),
([pd.Series(index=[1, 2, 3])], False, ["a", "b"]),
([pd.Series(name="empty series name")], False, ["index1"]),
(
[pd.Series([1]), pd.Series([]), pd.Series([3])],
False,
["0", "2", "1"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([]),
pd.Series([3], name="series that is named"),
],
False,
["_", "+", "*"],
),
([pd.Series([1, 2, 3], name="hi")] * 10, False, ["mean"] * 10),
(
[pd.Series([1, 2, 3], name=None, index=[10, 11, 12])] * 10,
False,
["abc"] * 10,
),
(
[
pd.Series([1, 2, 3], name=None, index=[10, 11, 12]),
pd.Series([1, 2, 30], name=None, index=[13, 144, 15]),
],
True,
["set_index_a", "set_index_b"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([]),
pd.Series(index=[10, 11, 12]),
],
False,
["a", "b", "c"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], name="abc"),
pd.Series(index=[10, 11, 12]),
],
False,
["a", "v", "z"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([1, -100, 200, -399, 400], name="abc"),
pd.Series([111, 222, 333], index=[10, 11, 12]),
],
False,
["a", "v", "z"],
),
],
)
@pytest.mark.parametrize(
"columns", [None, ["0"], [0], ["abc"], [144, 13], [2, 1, 0]]
)
def test_dataframe_init_from_series_list_with_index(
data, ignore_dtype, index, columns
):
gd_data = [gd.from_pandas(obj) for obj in data]
expected = pd.DataFrame(data, columns=columns, index=index)
actual = gd.DataFrame(gd_data, columns=columns, index=index)
if ignore_dtype:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data, index",
[
([pd.Series([1, 2]), pd.Series([1, 2])], ["a", "b", "c"]),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([]),
pd.Series([3], name="series that is named"),
],
["_", "+"],
),
([pd.Series([1, 2, 3], name="hi")] * 10, ["mean"] * 9),
],
)
def test_dataframe_init_from_series_list_with_index_error(data, index):
gd_data = [gd.from_pandas(obj) for obj in data]
try:
pd.DataFrame(data, index=index)
except Exception as e:
with pytest.raises(type(e), match=re.escape(str(e))):
gd.DataFrame(gd_data, index=index)
else:
raise AssertionError(
"expected pd.DataFrame to because of index mismatch "
"with data dimensions"
)
@pytest.mark.parametrize(
"data",
[
[pd.Series([1, 2, 3], index=["a", "a", "a"])],
[pd.Series([1, 2, 3], index=["a", "a", "a"])] * 4,
[
pd.Series([1, 2, 3], index=["a", "b", "a"]),
pd.Series([1, 2, 3], index=["b", "b", "a"]),
],
[
pd.Series([1, 2, 3], index=["a", "b", "z"]),
pd.Series([1, 2, 3], index=["u", "b", "a"]),
pd.Series([1, 2, 3], index=["u", "b", "u"]),
],
],
)
def test_dataframe_init_from_series_list_duplicate_index_error(data):
gd_data = [gd.from_pandas(obj) for obj in data]
try:
pd.DataFrame(data)
except Exception as e:
with pytest.raises(ValueError, match=re.escape(str(e))):
gd.DataFrame(gd_data)
else:
raise AssertionError(
"expected pd.DataFrame to because of duplicates in index"
)
def test_dataframe_iterrows_itertuples():
df = gd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
with pytest.raises(
TypeError,
match=re.escape(
"cuDF does not support iteration of DataFrame "
"via itertuples. Consider using "
"`.to_pandas().itertuples()` "
"if you wish to iterate over namedtuples."
),
):
df.itertuples()
with pytest.raises(
TypeError,
match=re.escape(
"cuDF does not support iteration of DataFrame "
"via iterrows. Consider using "
"`.to_pandas().iterrows()` "
"if you wish to iterate over each row."
),
):
df.iterrows()
| 28.634743
| 79
| 0.547287
|
4a05ec7469ebc8bab4e637bf0a4cd9f50029cd13
| 269
|
py
|
Python
|
os_scrapy_rq_crawler/__init__.py
|
zanachka/os-scrapy-rq-crawler
|
f861f5633b90ce4e2c3a5488a14ed77b1c40d7af
|
[
"MIT"
] | 3
|
2020-05-13T12:55:03.000Z
|
2021-03-15T10:09:12.000Z
|
os_scrapy_rq_crawler/__init__.py
|
zanachka/os-scrapy-rq-crawler
|
f861f5633b90ce4e2c3a5488a14ed77b1c40d7af
|
[
"MIT"
] | null | null | null |
os_scrapy_rq_crawler/__init__.py
|
zanachka/os-scrapy-rq-crawler
|
f861f5633b90ce4e2c3a5488a14ed77b1c40d7af
|
[
"MIT"
] | 1
|
2020-10-29T18:15:25.000Z
|
2020-10-29T18:15:25.000Z
|
from .asyncio.rq import AsyncRequestQueue
from .upstream import MultiUpstreamRequestQueue
from .utils import HTTPRequestQueue, MemoryRequestQueue
__all__ = [
"MemoryRequestQueue",
"AsyncRequestQueue",
"HTTPRequestQueue",
"MultiUpstreamRequestQueue",
]
| 24.454545
| 55
| 0.784387
|
4a05ecb0dfd4dab6e0d0f2ea82a8fe1f5f558c6f
| 758
|
py
|
Python
|
Setup.py
|
linard-y/pySolanio
|
3bbb689ee7b9ccfca6ea52f99f84263dd03b1045
|
[
"MIT"
] | null | null | null |
Setup.py
|
linard-y/pySolanio
|
3bbb689ee7b9ccfca6ea52f99f84263dd03b1045
|
[
"MIT"
] | null | null | null |
Setup.py
|
linard-y/pySolanio
|
3bbb689ee7b9ccfca6ea52f99f84263dd03b1045
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
import pySolanio
setup(
name='pySolanio',
version=pySolanio.__version__,
packages=find_packages(),
author="Linard Y.",
author_email="yldev@free.fr",
description="Solution analysis input/output",
long_description=open('README.md').read(),
include_package_data=True,
url='http://github.com/linard-y/pySolanio',
classifiers=[
"Programming Language :: Python",
"Development Status :: 1 - Planning",
"License :: OSI Approved :: MIT License",
"Natural Language :: French",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Topic :: Chemical Data Manipulation",
]
)
| 21.055556
| 49
| 0.627968
|
4a05ed6ae9de38ebef33b780fbce89622cc49d69
| 6,797
|
py
|
Python
|
salt/modules/win_service.py
|
moniker-dns/salt
|
0e1cd880dc7831b9f937a213dd90cc32e2a09884
|
[
"Apache-2.0"
] | 1
|
2016-03-13T09:05:15.000Z
|
2016-03-13T09:05:15.000Z
|
salt/modules/win_service.py
|
moniker-dns/salt
|
0e1cd880dc7831b9f937a213dd90cc32e2a09884
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/win_service.py
|
moniker-dns/salt
|
0e1cd880dc7831b9f937a213dd90cc32e2a09884
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Windows Service module.
'''
# Import python libs
import time
import salt.utils
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows():
return 'service'
return False
def get_enabled():
'''
Return the enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
ret = set()
services = []
cmd = 'sc query type= service state= all'
lines = __salt__['cmd.run'](cmd).splitlines()
for line in lines:
if 'SERVICE_NAME:' in line:
comps = line.split(':', 1)
if not len(comps) > 1:
continue
services.append(comps[1].strip())
for service in services:
cmd2 = 'sc qc "{0}"'.format(service)
lines = __salt__['cmd.run'](cmd2).splitlines()
for line in lines:
if 'AUTO_START' in line:
ret.add(service)
return sorted(ret)
def get_disabled():
'''
Return the disabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
ret = set()
services = []
cmd = 'sc query type= service state= all'
lines = __salt__['cmd.run'](cmd).splitlines()
for line in lines:
if 'SERVICE_NAME:' in line:
comps = line.split(':', 1)
if not len(comps) > 1:
continue
services.append(comps[1].strip())
for service in services:
cmd2 = 'sc qc "{0}"'.format(service)
lines = __salt__['cmd.run'](cmd2).splitlines()
for line in lines:
if 'DEMAND_START' in line:
ret.add(service)
elif 'DISABLED' in line:
ret.add(service)
return sorted(ret)
def available(name):
'''
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available <service name>
'''
return name in get_all()
def get_all():
'''
Return all installed services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
return sorted(get_enabled() + get_disabled())
def get_service_name(*args):
'''
The Display Name is what is displayed in Windows when services.msc is
executed. Each Display Name has an associated Service Name which is the
actual name of the service. This function allows you to discover the
Service Name by returning a dictionary of Display Names and Service Names,
or filter by adding arguments of Display Names.
If no args are passed, return a dict of all services where the keys are the
service Display Names and the values are the Service Names.
If arguments are passed, create a dict of Display Names and Service Names
CLI Example:
.. code-block:: bash
salt '*' service.get_service_name
salt '*' service.get_service_name 'Google Update Service (gupdate)' 'DHCP Client'
'''
ret = {}
services = []
display_names = []
cmd = 'sc query type= service state= all'
lines = __salt__['cmd.run'](cmd).splitlines()
for line in lines:
if 'SERVICE_NAME:' in line:
comps = line.split(':', 1)
if not len(comps) > 1:
continue
services.append(comps[1].strip())
if 'DISPLAY_NAME:' in line:
comps = line.split(':', 1)
if not len(comps) > 1:
continue
display_names.append(comps[1].strip())
if len(services) == len(display_names):
service_dict = dict(zip(display_names, services))
else:
return 'Service Names and Display Names mismatch'
if len(args) == 0:
return service_dict
for arg in args:
if arg in service_dict:
ret[arg] = service_dict[arg]
return ret
def start(name):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = 'sc start "{0}"'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
cmd = 'sc stop "{0}"'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
stop(name)
for idx in xrange(5):
if status(name):
time.sleep(2)
continue
return start(name)
return False
def status(name, sig=None):
'''
Return the status for a service, returns the PID or an empty string if the
service is running or not, pass a signature to use to find the service via
ps
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
'''
cmd = 'sc query "{0}"'.format(name)
statuses = __salt__['cmd.run'](cmd).splitlines()
for line in statuses:
if 'RUNNING' in line:
return True
elif 'STOP_PENDING' in line:
return True
return False
def getsid(name):
'''
Return the sid for this windows service
CLI Example:
.. code-block:: bash
salt '*' service.getsid <service name>
'''
cmd = 'sc showsid "{0}"'.format(name)
lines = __salt__['cmd.run'](cmd).splitlines()
for line in lines:
if 'SERVICE SID:' in line:
comps = line.split(':', 1)
if comps[1] > 1:
return comps[1].strip()
else:
return None
def enable(name, **kwargs):
'''
Enable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
cmd = 'sc config "{0}" start= auto'.format(name)
return not __salt__['cmd.retcode'](cmd)
def disable(name, **kwargs):
'''
Disable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
cmd = 'sc config "{0}" start= demand'.format(name)
return not __salt__['cmd.retcode'](cmd)
def enabled(name):
'''
Check to see if the named service is enabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
return name in get_enabled()
def disabled(name):
'''
Check to see if the named service is disabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return name in get_disabled()
| 22.885522
| 89
| 0.576872
|
4a05edb6c0bca8e76cd51c3be4bdfd6230c29623
| 39,981
|
py
|
Python
|
week/migrations/0001_initial.py
|
uno-isqa-8950/fitgirl-inc
|
2656e7340e85ab8cbeb0de19dcbc81030b9b5b81
|
[
"MIT"
] | 6
|
2018-09-11T15:30:10.000Z
|
2020-01-14T17:29:07.000Z
|
week/migrations/0001_initial.py
|
uno-isqa-8950/fitgirl-inc
|
2656e7340e85ab8cbeb0de19dcbc81030b9b5b81
|
[
"MIT"
] | 722
|
2018-08-29T17:27:38.000Z
|
2022-03-11T23:28:33.000Z
|
week/migrations/0001_initial.py
|
uno-isqa-8950/fitgirl-inc
|
2656e7340e85ab8cbeb0de19dcbc81030b9b5b81
|
[
"MIT"
] | 13
|
2018-08-29T07:42:01.000Z
|
2019-04-21T22:34:30.000Z
|
# Generated by Django 2.2.4 on 2020-05-03 17:02
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailimages', '0001_squashed_0021'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('account', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AnnouncementAlertPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('announcements', wagtail.core.fields.RichTextField(blank=True)),
('display_warning', models.BooleanField(default=False, help_text='Check this box to display warning announcement on the website')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='Disclaimerlink',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('disclaimer', wagtail.core.fields.RichTextField(blank=True)),
('disclaimer2', models.CharField(blank=True, max_length=10000)),
('disclaimer3', models.CharField(blank=True, max_length=10000)),
('disclaimer4', models.CharField(blank=True, max_length=10000)),
('disclaimer5', models.CharField(blank=True, max_length=10000)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='DisclaimerPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('disclaimer', wagtail.core.fields.RichTextField(blank=True)),
('disclaimer2', models.CharField(blank=True, max_length=10000)),
('disclaimer3', models.CharField(blank=True, max_length=10000)),
('disclaimer4', models.CharField(blank=True, max_length=10000)),
('disclaimer5', models.CharField(blank=True, max_length=10000)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EmailTemplates',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('subject_for_inactivity', models.CharField(blank=True, max_length=10000)),
('subject_for_group', models.CharField(blank=True, max_length=10000)),
('group_message', wagtail.core.fields.RichTextField(blank=True)),
('inactivity_message', wagtail.core.fields.RichTextField(blank=True)),
('subject_for_rewards_notification', models.CharField(blank=True, max_length=10000)),
('rewards_message', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ExtrasIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('description', wagtail.core.fields.RichTextField(blank=True)),
('additional', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='KindnessCardPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('KindnessCard', models.CharField(blank=True, max_length=10000)),
('KindnessCard2', models.CharField(blank=True, max_length=10000)),
('KindnessCard3', models.CharField(blank=True, max_length=10000)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PreassessmentPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('thank_you_text', wagtail.core.fields.RichTextField(blank=True)),
('points_for_this_activity', models.IntegerField(blank=True, default=0)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='Print',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PrivacyPolicyLink',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('policy', wagtail.core.fields.RichTextField(blank=True)),
('policy2', models.CharField(blank=True, max_length=10000)),
('attach_file', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ProgramIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('description', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='QuestionPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('thank_you_text', wagtail.core.fields.RichTextField(blank=True)),
('points_for_this_activity', models.IntegerField(blank=True, default=0)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='QuestionPageText',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('description', wagtail.core.fields.RichTextField(blank=True)),
('thank_you_text', wagtail.core.fields.RichTextField(blank=True)),
('points_for_this_activity', models.IntegerField(blank=True, default=0)),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='RewardsIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('description', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SidebarContentPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('subject_for_announcement1', models.CharField(blank=True, max_length=10000)),
('message_announcement1', wagtail.core.fields.RichTextField(blank=True)),
('subject_for_announcement2', models.CharField(blank=True, max_length=10000)),
('message_announcement2', wagtail.core.fields.RichTextField(blank=True)),
('subject_for_announcement3', models.CharField(blank=True, max_length=10000)),
('message_announcement3', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SidebarImagePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('subject_for_advertisement', models.CharField(blank=True, max_length=10000)),
('advertisement_image', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StatementsPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('mission', models.CharField(blank=True, max_length=200)),
('vision', models.CharField(blank=True, max_length=200)),
('values', models.CharField(blank=True, max_length=200)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='WeekPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('description', wagtail.core.fields.RichTextField(blank=True)),
('start_date', models.DateTimeField(blank=True, null=True, verbose_name='Start Date')),
('end_date', models.DateTimeField(blank=True, null=True, verbose_name='End Date')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='welcomepage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('text1', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='UserActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Activity', models.CharField(max_length=50)),
('Week', models.IntegerField(null=True)),
('DayOfWeek', models.CharField(max_length=10)),
('points_earned', models.IntegerField(null=True)),
('creation_date', models.DateField()),
('updated_date', models.DateField()),
('program', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Program')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Sensitive',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('description', wagtail.core.fields.RichTextField(blank=True)),
('body', wagtail.core.fields.RichTextField(blank=True)),
('age_group_content', models.IntegerField(blank=True, default=0, verbose_name='Enter the age group to show the content to: 1 for 6 or younger; 2 for ages 7-10; 3 for ages 11-13; 4 for ages 14-16; 5 for 17+')),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='QuestionTextFormField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255, verbose_name='label')),
('field_type', models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time'), ('hidden', 'Hidden field')], max_length=16, verbose_name='field type')),
('required', models.BooleanField(default=True, verbose_name='required')),
('choices', models.TextField(blank=True, help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', verbose_name='choices')),
('default_value', models.CharField(blank=True, help_text='Default value. Comma separated values supported for checkboxes.', max_length=255, verbose_name='default value')),
('help_text', models.CharField(blank=True, max_length=255, verbose_name='help text')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='form_field', to='week.QuestionPageText')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='QuestionFormField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255, verbose_name='label')),
('field_type', models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time'), ('hidden', 'Hidden field')], max_length=16, verbose_name='field type')),
('required', models.BooleanField(default=True, verbose_name='required')),
('choices', models.TextField(blank=True, help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', verbose_name='choices')),
('default_value', models.CharField(blank=True, help_text='Default value. Comma separated values supported for checkboxes.', max_length=255, verbose_name='default value')),
('help_text', models.CharField(blank=True, max_length=255, verbose_name='help text')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='form_fields', to='week.QuestionPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='PreassessmentFormField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255, verbose_name='label')),
('field_type', models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time'), ('hidden', 'Hidden field')], max_length=16, verbose_name='field type')),
('required', models.BooleanField(default=True, verbose_name='required')),
('choices', models.TextField(blank=True, help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', verbose_name='choices')),
('default_value', models.CharField(blank=True, help_text='Default value. Comma separated values supported for checkboxes.', max_length=255, verbose_name='default value')),
('help_text', models.CharField(blank=True, max_length=255, verbose_name='help text')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='form_fields', to='week.PreassessmentPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='PostassessmentPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('thank_you_text', wagtail.core.fields.RichTextField(blank=True)),
('points_for_this_activity', models.IntegerField(blank=True, default=0)),
('start_date', models.DateTimeField(blank=True, null=True, verbose_name='Start Date')),
('end_date', models.DateTimeField(blank=True, null=True, verbose_name='End Date')),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PostassessmentFormField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255, verbose_name='label')),
('field_type', models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time'), ('hidden', 'Hidden field')], max_length=16, verbose_name='field type')),
('required', models.BooleanField(default=True, verbose_name='required')),
('choices', models.TextField(blank=True, help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', verbose_name='choices')),
('default_value', models.CharField(blank=True, help_text='Default value. Comma separated values supported for checkboxes.', max_length=255, verbose_name='default value')),
('help_text', models.CharField(blank=True, max_length=255, verbose_name='help text')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='form_fields', to='week.PostassessmentPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='PhysicalPostPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('strength', wagtail.core.fields.RichTextField(blank=True)),
('agility', wagtail.core.fields.RichTextField(blank=True)),
('flexibility', wagtail.core.fields.RichTextField(blank=True)),
('points_for_this_activity', models.IntegerField(blank=True, default=0)),
('timer_for_this_activity', models.CharField(blank=True, default=datetime.time(0, 11), help_text='Time format should be in MM:SS', max_length=20)),
('thank_you_text', wagtail.core.fields.RichTextField(blank=True)),
('start_date', models.DateTimeField(blank=True, null=True, verbose_name='Start Date')),
('end_date', models.DateTimeField(blank=True, null=True, verbose_name='End Date')),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PhysicalFormField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255, verbose_name='label')),
('field_type', models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time'), ('hidden', 'Hidden field')], max_length=16, verbose_name='field type')),
('required', models.BooleanField(default=True, verbose_name='required')),
('choices', models.TextField(blank=True, help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', verbose_name='choices')),
('default_value', models.CharField(blank=True, help_text='Default value. Comma separated values supported for checkboxes.', max_length=255, verbose_name='default value')),
('help_text', models.CharField(blank=True, max_length=255, verbose_name='help text')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='form_fields', to='week.PhysicalPostPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='NutritionPostPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.RichTextField(blank=True)),
('morecontent', wagtail.core.fields.RichTextField(blank=True)),
('facts', wagtail.core.fields.RichTextField(blank=True)),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='NutritionGame',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.RichTextField(blank=True)),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ModelIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('description', wagtail.core.fields.RichTextField(blank=True)),
('intro', models.CharField(blank=True, max_length=255)),
('ad_url', models.URLField(blank=True)),
('vertical_url', models.URLField(blank=True)),
('announcements', wagtail.core.fields.RichTextField(blank=True)),
('ad_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('vertical_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='MentalPostPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.RichTextField(blank=True)),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='MentalArtPostPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.RichTextField(blank=True)),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='LandingIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('description', wagtail.core.fields.RichTextField(blank=True)),
('additional', wagtail.core.fields.RichTextField(blank=True)),
('physical', wagtail.core.fields.RichTextField(blank=True)),
('nutritional', wagtail.core.fields.RichTextField(blank=True)),
('mental', wagtail.core.fields.RichTextField(blank=True)),
('relational', wagtail.core.fields.RichTextField(blank=True)),
('physicaldesc', wagtail.core.fields.RichTextField(blank=True)),
('nutritionaldesc', wagtail.core.fields.RichTextField(blank=True)),
('mentaldesc', wagtail.core.fields.RichTextField(blank=True)),
('relationaldesc', wagtail.core.fields.RichTextField(blank=True)),
('card_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('card_imageb', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('card_imagec', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('card_imaged', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='FunStuffGames',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('callout_intro', wagtail.core.fields.RichTextField(blank=True)),
('callout_message', wagtail.core.fields.RichTextField(blank=True)),
('body', wagtail.core.fields.RichTextField(blank=True)),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='FunStuffArt',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('callout_intro', wagtail.core.fields.RichTextField(blank=True)),
('callout_message', wagtail.core.fields.RichTextField(blank=True)),
('body', wagtail.core.fields.RichTextField(blank=True)),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='Fact',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('description', wagtail.core.fields.RichTextField(blank=True)),
('body', wagtail.core.fields.RichTextField(blank=True)),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BonusQuestionPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('thank_you_text', wagtail.core.fields.RichTextField(blank=True)),
('points_for_this_activity', models.IntegerField(blank=True, default=0)),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BonusQuestionFormField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255, verbose_name='label')),
('field_type', models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time'), ('hidden', 'Hidden field')], max_length=16, verbose_name='field type')),
('required', models.BooleanField(default=True, verbose_name='required')),
('choices', models.TextField(blank=True, help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', verbose_name='choices')),
('default_value', models.CharField(blank=True, help_text='Default value. Comma separated values supported for checkboxes.', max_length=255, verbose_name='default value')),
('help_text', models.CharField(blank=True, max_length=255, verbose_name='help text')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='form_fields', to='week.BonusQuestionPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='addstudentoftheweek',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('student_name', models.CharField(blank=True, max_length=200)),
('my_favorite_color', models.CharField(blank=True, max_length=200)),
('my_favorite_healthy_snack', models.CharField(blank=True, max_length=200)),
('my_favorite_sport', models.CharField(blank=True, max_length=200)),
('my_favorite_athlete', models.CharField(blank=True, max_length=200)),
('my_friends_would_describe_me_as', models.CharField(blank=True, max_length=300)),
('am_good_at', models.CharField(blank=True, max_length=300)),
('display_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='AboutUsIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('description', wagtail.core.fields.RichTextField(blank=True)),
('ad_url', models.URLField(blank=True)),
('ad_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='CustomFormSubmission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('form_data', models.TextField()),
('submit_time', models.DateTimeField(auto_now_add=True, verbose_name='submit time')),
('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Page')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='question_form', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('page', 'user')},
},
),
]
| 63.461905
| 449
| 0.602586
|
4a05edbb09bf4e5a77939b0ec51765a45de8b739
| 2,529
|
py
|
Python
|
tensorflow_probability/python/bijectors/softsign.py
|
m-colombo/probability
|
74037f90010c08e17a567c281ff3f70f4157364a
|
[
"Apache-2.0"
] | 1
|
2018-09-15T05:02:30.000Z
|
2018-09-15T05:02:30.000Z
|
tensorflow_probability/python/bijectors/softsign.py
|
snehil03july/probability
|
5f576230f1e261a823e20a49c442ff38c8f381d3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/bijectors/softsign.py
|
snehil03july/probability
|
5f576230f1e261a823e20a49c442ff38c8f381d3
|
[
"Apache-2.0"
] | 1
|
2019-10-13T19:52:57.000Z
|
2019-10-13T19:52:57.000Z
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Softsign bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.distributions import bijector
__all__ = [
"Softsign",
]
class Softsign(bijector.Bijector):
"""Bijector which computes `Y = g(X) = X / (1 + |X|)`.
The softsign `Bijector` has the following two useful properties:
* The domain is all real numbers
* `softsign(x) approx sgn(x)`, for large `|x|`.
#### Examples
```python
# Create the Y = softsign(X) transform.
softsign = Softsign()
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
x / (1 + abs(x)) == softsign.forward(x)
x / (1 - abs(x)) == softsign.inverse(x)
```
"""
def __init__(self, validate_args=False, name="softsign"):
super(Softsign, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
return x / (1. + tf.abs(x))
def _inverse(self, y):
y = self._maybe_assert_valid_y(y)
return y / (1. - tf.abs(y))
def _forward_log_det_jacobian(self, x):
return -2. * tf.log1p(tf.abs(x))
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
return -2. * tf.log1p(-tf.abs(y))
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_valid = [
tf.assert_greater(
y,
tf.cast(-1., dtype=y.dtype.base_dtype),
message="Inverse transformation input must be greater than -1."),
tf.assert_less(
y,
tf.cast(1., dtype=y.dtype.base_dtype),
message="Inverse transformation input must be less than 1.")
]
return control_flow_ops.with_dependencies(is_valid, y)
| 28.738636
| 78
| 0.642151
|
4a05ee9ddfedf06c42ef4d8cb985400a44472736
| 4,757
|
py
|
Python
|
tests/util.py
|
questdb/pykit
|
c8aac35ab57b88d422f40126380f11f1d1e2d143
|
[
"Apache-2.0"
] | 7
|
2021-10-04T12:28:17.000Z
|
2022-01-13T16:41:47.000Z
|
tests/util.py
|
questdb/pykit
|
c8aac35ab57b88d422f40126380f11f1d1e2d143
|
[
"Apache-2.0"
] | null | null | null |
tests/util.py
|
questdb/pykit
|
c8aac35ab57b88d422f40126380f11f1d1e2d143
|
[
"Apache-2.0"
] | 1
|
2022-02-10T05:53:24.000Z
|
2022-02-10T05:53:24.000Z
|
#
# ___ _ ____ ____
# / _ \ _ _ ___ ___| |_| _ \| __ )
# | | | | | | |/ _ \/ __| __| | | | _ \
# | |_| | |_| | __/\__ \ |_| |_| | |_) |
# \__\_\\__,_|\___||___/\__|____/|____/
#
# Copyright (c) 2014-2019 Appsicle
# Copyright (c) 2019-2020 QuestDB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import os
import psycopg2
import numpy as np
import mmap
from pathlib import Path
import pandas as pd
from pandas.core.internals import (BlockManager, make_block)
from pandas.core.indexes.base import Index
from pykit import (
select_all,
with_cursor,
Cursor
)
from pykit.internal import (
MemSnapshot,
mem_snapshot,
mem_snapshot_diff
)
class BaseTestTest(unittest.TestCase):
def assert_table_content(self, table_name: str, expected: str) -> None:
results = ''
for row in select_all(table_name):
results += str(row) + os.linesep
self.assertEqual(expected, results)
def take_mem_snapshot(self):
return mem_snapshot()
def report_mem_snapshot_diff(self, snapshot_start: MemSnapshot, heading: str = None) -> MemSnapshot:
snapshot_now = mem_snapshot()
if heading is not None:
print(heading)
print(mem_snapshot_diff(snapshot_start, snapshot_now))
return snapshot_now
def create_rnd_table(self, table_name: str, num_rows: int = 10):
def _create_rnd_table(stmt_cursor: Cursor) -> None:
statement = f'CREATE TABLE {table_name} AS('
statement += 'SELECT'
statement += ' rnd_long(0, 9223372036854775807, 1) long, '
statement += ' rnd_int(0, 2147483647, 1) int, '
statement += ' rnd_boolean() boolean, '
statement += " rnd_date(to_date('1978', 'yyyy'), to_date('2021', 'yyyy'), 1) date, "
statement += ' rnd_double(1) double, '
statement += " rnd_timestamp(to_timestamp('1978', 'yyyy'), to_timestamp('2021', 'yyyy'), 0) ts "
statement += 'FROM'
statement += f' long_sequence({num_rows})'
statement += ') timestamp(ts) partition by YEAR;'
stmt_cursor.execute(statement)
try:
with_cursor(_create_rnd_table)
except (Exception, psycopg2.Error) as create_error:
print(f'Error while creating rnd table [{table_name}]: {create_error}')
def dataframe(file_path: Path,
col_name: str,
row_count: int,
dtype: np.dtype,
storage: int,
na_value: int,
cls=None):
return pd.DataFrame(
data=BlockManagerUnconsolidated(
blocks=(make_block(
values=mmap_column(file_path, row_count, dtype, storage, na_value, cls),
placement=(0,)
),),
axes=[
Index(data=[col_name]),
pd.RangeIndex(name='Idx', start=0, stop=row_count, step=1)
],
verify_integrity=False),
copy=False)
def mmap_column(file_path: Path, nrows: int, dtype: np.dtype, storage: int, na_value: int, cls=None):
with open(file_path, mode='rb') as col_file:
col_mmap = mmap.mmap(
col_file.fileno(),
length=nrows * storage,
flags=mmap.MAP_SHARED,
access=mmap.ACCESS_READ,
offset=0)
column_array = np.ndarray(shape=(nrows,), dtype=dtype, buffer=col_mmap, offset=0, order='C')
column_array.flags['WRITEABLE'] = False
column_array.flags['ALIGNED'] = True
mask_array = np.zeros((nrows,), dtype=bool, order='C')
for null_idx in np.where(column_array == na_value):
mask_array[null_idx] = True
np.save(Path('resources') / 'null_mask.npy', mask_array, allow_pickle=False)
constructor = pd.arrays.IntegerArray if cls is None else cls
return constructor(column_array, mask_array)
class BlockManagerUnconsolidated(BlockManager):
def __init__(self, *args, **kwargs):
BlockManager.__init__(self, *args, **kwargs)
self._is_consolidated = False
self._known_consolidated = True
def _consolidate_inplace(self):
pass
def _consolidate(self):
return self.blocks
| 35.237037
| 109
| 0.622031
|
4a05ef7b8a5251310d48536430191c6293e4faa7
| 4,577
|
py
|
Python
|
gn/compile_sksl_tests.py
|
borodust/skia
|
bbbf1a7f50a303bd76163793bd5968c72f5f4432
|
[
"BSD-3-Clause"
] | null | null | null |
gn/compile_sksl_tests.py
|
borodust/skia
|
bbbf1a7f50a303bd76163793bd5968c72f5f4432
|
[
"BSD-3-Clause"
] | null | null | null |
gn/compile_sksl_tests.py
|
borodust/skia
|
bbbf1a7f50a303bd76163793bd5968c72f5f4432
|
[
"BSD-3-Clause"
] | 1
|
2021-06-06T21:31:52.000Z
|
2021-06-06T21:31:52.000Z
|
#!/usr/bin/env python
#
# Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shlex
import subprocess
import sys
import tempfile
batchCompile = True
skslc = sys.argv[1]
lang = sys.argv[2]
settings = sys.argv[3]
with open(sys.argv[4], 'r') as reader:
inputs = shlex.split(reader.read())
def pairwise(iterable):
# Iterate over an array pairwise (two elements at a time).
a = iter(iterable)
return zip(a, a)
def executeWorklist(input, worklist):
# Invoke skslc, passing in the worklist.
worklist.close()
try:
output = subprocess.check_output([skslc, worklist.name], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.returncode != 1:
print("### " + input + " skslc error:\n")
print("\n".join(err.output.splitlines()))
sys.exit(err.returncode)
pass # Compile errors (exit code 1) are expected and normal in test code
# Delete the worklist file now that execution is complete.
os.remove(worklist.name)
def makeEmptyFile(path):
try:
open(path, 'wb').close()
except OSError:
pass
def extensionForSpirvAsm(ext):
return ext if (ext == '.frag' or ext == '.vert' or ext == '.geom') else '.frag'
if settings != "--settings" and settings != "--nosettings":
sys.exit("### Expected --settings or --nosettings, got " + settings)
targets = []
worklist = tempfile.NamedTemporaryFile(suffix='.worklist', delete=False)
# The `inputs` array pairs off input files with their matching output directory, e.g.:
# //skia/tests/sksl/shared/test.sksl
# //skia/tests/sksl/shared/golden/
# //skia/tests/sksl/intrinsics/abs.sksl
# //skia/tests/sksl/intrinsics/golden/
# ... (etc) ...
# Here we loop over these inputs and convert them into a worklist file for skslc.
for input, targetDir in pairwise(inputs):
noExt, ext = os.path.splitext(input)
head, tail = os.path.split(noExt)
if not os.path.isdir(targetDir):
os.mkdir(targetDir)
target = os.path.join(targetDir, tail)
if settings == "--nosettings":
target += "StandaloneSettings"
targets.append(target)
if lang == "--fp":
worklist.write(input + "\n")
worklist.write(target + ".cpp\n")
worklist.write(settings + "\n\n")
worklist.write(input + "\n")
worklist.write(target + ".h\n")
worklist.write(settings + "\n\n")
elif lang == "--glsl":
worklist.write(input + "\n")
worklist.write(target + ".glsl\n")
worklist.write(settings + "\n\n")
elif lang == "--metal":
worklist.write(input + "\n")
worklist.write(target + ".metal\n")
worklist.write(settings + "\n\n")
elif lang == "--spirv":
worklist.write(input + "\n")
worklist.write(target + ".asm" + extensionForSpirvAsm(ext) + "\n")
worklist.write(settings + "\n\n")
elif lang == "--skvm":
worklist.write(input + "\n")
worklist.write(target + ".skvm\n")
worklist.write(settings + "\n\n")
elif lang == "--stage":
worklist.write(input + "\n")
worklist.write(target + ".stage\n")
worklist.write(settings + "\n\n")
else:
sys.exit("### Expected one of: --fp --glsl --metal --spirv --skvm --stage, got " + lang)
# Compile items one at a time.
if not batchCompile:
executeWorklist(input, worklist)
worklist = tempfile.NamedTemporaryFile(suffix='.worklist', delete=False)
# Compile everything all in one go.
if batchCompile:
executeWorklist("", worklist)
else:
worklist.close()
os.remove(worklist.name)
# A special case cleanup pass, just for CPP and H files: if either one of these files starts with
# `### Compilation failed`, its sibling should be replaced by an empty file. This improves clarity
# during code review; a failure on either file means that success on the sibling is irrelevant.
if lang == "--fp":
for target in targets:
cppFile = open(target + '.cpp', 'r')
hFile = open(target + '.h', 'r')
if cppFile.readline().startswith("### Compilation failed"):
# The CPP had a compilation failure. Clear the header file.
hFile.close()
makeEmptyFile(target + '.h')
elif hFile.readline().startswith("### Compilation failed"):
# The header had a compilation failure. Clear the CPP file.
cppFile.close()
makeEmptyFile(target + '.cpp')
| 34.413534
| 98
| 0.626393
|
4a05f012a3c41cbd4cedd528f8e799f99b9f7fed
| 35,825
|
py
|
Python
|
src/auto-posture-evaluator/testers/vpc_tester.py
|
antstackio/coralogix-aws-serverless
|
00d49bc8bb22d2ec466d68a3d77d967d6ead5fa8
|
[
"Apache-2.0"
] | null | null | null |
src/auto-posture-evaluator/testers/vpc_tester.py
|
antstackio/coralogix-aws-serverless
|
00d49bc8bb22d2ec466d68a3d77d967d6ead5fa8
|
[
"Apache-2.0"
] | null | null | null |
src/auto-posture-evaluator/testers/vpc_tester.py
|
antstackio/coralogix-aws-serverless
|
00d49bc8bb22d2ec466d68a3d77d967d6ead5fa8
|
[
"Apache-2.0"
] | null | null | null |
import time
import boto3
import interfaces
import json
def _format_string_to_json(text):
return json.loads(text)
class Tester(interfaces.TesterInterface):
def __init__(self):
self.aws_vpc_client = boto3.client('ec2')
self.cache = {}
self.user_id = boto3.client('sts').get_caller_identity().get('UserId')
self.account_arn = boto3.client('sts').get_caller_identity().get('Arn')
self.account_id = boto3.client('sts').get_caller_identity().get('Account')
self.all_vpc_details = self._get_all_vpc()
self.all_ami_images = self._get_all_ami_images()
def _get_all_vpc(self):
response = self.aws_vpc_client.describe_vpcs()
vpc_detail = []
# If you have the required permissions, the error response is DryRunOperation .
# Otherwise, it is UnauthorizedOperation .
if response and 'Vpcs' in response and response['Vpcs']:
vpc_detail.extend(response['Vpcs'])
while 'NextToken' in response and response['NextToken']:
response = self.aws_vpc_client.describe_vpcs(NextToken=response['NextToken'])
if response and 'Vpcs' in response and response['Vpcs']:
vpc_detail.extend(response['Vpcs'])
return vpc_detail
def _get_all_ami_images(self):
response_of_describe_images = self.aws_vpc_client.describe_images()
if response_of_describe_images and 'Images' in response_of_describe_images and response_of_describe_images[
'Images']:
return response_of_describe_images['Images']
return []
def declare_tested_service(self) -> str:
return 'vpc'
def declare_tested_provider(self) -> str:
return 'aws'
def run_tests(self) -> list:
return self.detect_vpc_logging_status() + \
self.detect_vpc_endpoint_publicly_accessibility() + \
self.detect_network_acl_restriction_status() + \
self.detect_vpc_network_acl_inbound_and_outbound_traffic_rules() + \
self.detect_default_nacl_used() + \
self.detect_vpc_dnc_resolution_enabled() + \
self.detect_vpc_unrestricted_icmp_access() + \
self.detect_securitygroup_inbound_rule_without_specified_protocol() + \
self.detect_public_and_not_encrypted_ami_images() + \
self.detect_vpc_peering_connection() + \
self.detect_unrestricted_ssh_access() + \
self.detect_vpc_unrestricted_smb_access() + \
self.detect_vpc_unrestricted_dns_tcp_access() + \
self.detect_vpc_unrestricted_vnc_server_access() + \
self.detect_vpc_unrestricted_dns_udp_access() + \
self.detect_vpc_unrestricted_ftp_access() + \
self.detect_vpc_unrestricted_cifs_access() + \
self.detect_vpc_default_security_groups_in_use() + \
self.detect_vpc_unrestricted_telnet_access() + \
self.detect_vpc_unrestricted_rdp_access() + \
self.detect_vpc_unrestricted_ftp_data_access() + \
self.detect_vpc_unrestricted_smtp_access() + \
self.detect_vpc_unrestricted_sql_server_tcp_access() + \
self.detect_vpc_unrestricted_sql_server_udp_access() + \
self.detect_vpc_unrestricted_net_bios_access() + \
self.detect_vpc_unrestricted_mysql_access() + \
self.detect_vpc_unrestricted_postgre_sql_access() + \
self.detect_vpc_unrestricted_vnc_listener_access() + \
self.detect_vpc_eip_in_use() + \
self.detect_vpc_security_group_per_vpc_limit()
def _append_vpc_test_result(self, vpc_detail, test_name, issue_status):
return {
"user": self.user_id,
"account_arn": self.account_arn,
"account": self.account_id,
"timestamp": time.time(),
"item": vpc_detail['VpcId'],
"item_type": "vpc",
"test_name": test_name,
"test_result": issue_status
}
def _append_vpc_acm_test_result(self, acm_image_id, test_name, issue_status):
return {
"user": self.user_id,
"account_arn": self.account_arn,
"account": self.account_id,
"timestamp": time.time(),
"item": acm_image_id,
"item_type": "vpc",
"test_name": test_name,
"test_result": issue_status
}
def _check_logging_status(self, test_name, ):
logging_result = []
for vpc_detail in self.all_vpc_details:
result = self.aws_vpc_client.describe_flow_logs(Filters=[
{
'Name': 'resource-id',
'Values': [vpc_detail['VpcId']]
},
])
if result and result['FlowLogs']:
logging_result.append(self._append_vpc_test_result(vpc_detail, test_name, 'no_issue_found'))
else:
logging_result.append(self._append_vpc_test_result(vpc_detail, test_name, 'issue_found'))
return logging_result
def _check_vpc_public_accessibility(self, test_name):
vpc_public_accessible = []
for vpc_detail in self.all_vpc_details:
result = self.aws_vpc_client.describe_vpc_endpoints(Filters=[
{
'Name': 'vpc-id',
'Values': [vpc_detail['VpcId']]
},
])
if result and 'VpcEndpoints' in result and result['VpcEndpoints']:
for vpc_end_point_data in result['VpcEndpoints']:
if 'PolicyDocument' in vpc_end_point_data and vpc_end_point_data['PolicyDocument']:
policy_document_json_data = _format_string_to_json(vpc_end_point_data['PolicyDocument'])
if 'Statement' in policy_document_json_data:
issue_found = False
for statement_dict in policy_document_json_data['Statement']:
if 'Principal' in statement_dict and statement_dict[
'Principal'] == '*' or 'Principal' in statement_dict and 'AWS' in statement_dict[
'Principal'] and statement_dict['Principal']['AWS'] == '*':
issue_found = True
break
if issue_found:
vpc_public_accessible.append(
self._append_vpc_test_result(vpc_detail, test_name, 'issue_found'))
else:
vpc_public_accessible.append(
self._append_vpc_test_result(vpc_detail, test_name, 'no_issue_found'))
else:
vpc_public_accessible.append(
self._append_vpc_test_result(vpc_detail, test_name, 'no_issue_found'))
return vpc_public_accessible
def _check_ingress_administration_ports_range_for_network_acls_inbound_rule(self, test_name):
ingress_traffic_test_result = []
for vpc_detail in self.all_vpc_details:
vpc_id = vpc_detail['VpcId']
response = self.aws_vpc_client.describe_network_acls(Filters=[{
'Name': 'vpc-id',
'Values': [vpc_id]
}, ])
if response and 'NetworkAcls' in response and len(response['NetworkAcls']):
for acl in response['NetworkAcls']:
issue_found = False
for network_acl_rules in acl['Entries']:
if 'Egress' in network_acl_rules and not network_acl_rules['Egress'] and network_acl_rules[
'RuleAction'].lower() == 'allow':
if 'PortRange' not in network_acl_rules:
issue_found = True
break
# elif 'PortRange' in network_acl_rules and network_acl_rules['PortRange'] == []:
if issue_found:
ingress_traffic_test_result.append(
self._append_vpc_test_result(vpc_detail, test_name, 'issue_found'))
else:
ingress_traffic_test_result.append(
self._append_vpc_test_result(vpc_detail, test_name, 'no_issue_found'))
else:
ingress_traffic_test_result.append(
self._append_vpc_test_result(vpc_detail, test_name, 'no_issue_found'))
return ingress_traffic_test_result
def _check_securitygroup_inbound_rule_without_specified_protocol(self, test_name):
security_groups_inbound_rule_result = []
for vpc_detail in self.all_vpc_details:
security_groups_response = self.aws_vpc_client.describe_security_groups(Filters=[{
'Name': 'vpc-id',
'Values': [vpc_detail['VpcId']]
}])
issue_found = False
if security_groups_response and 'SecurityGroups' in security_groups_response and security_groups_response[
'SecurityGroups']:
for security_groups_dict in security_groups_response['SecurityGroups']:
if issue_found:
break
if 'IpPermissions' in security_groups_dict and security_groups_dict['IpPermissions']:
for ip_permission_dict in security_groups_dict['IpPermissions']:
if 'IpProtocol' in ip_permission_dict and str(
ip_permission_dict['IpProtocol']) == '-1' or str(
ip_permission_dict['IpProtocol']).lower() == 'all':
issue_found = True
break
else:
issue_found = True
break
else:
issue_found = True
if issue_found:
security_groups_inbound_rule_result.append(
self._append_vpc_test_result(vpc_detail, test_name, 'issue_found'))
else:
security_groups_inbound_rule_result.append(
self._append_vpc_test_result(vpc_detail, test_name, 'no_issue_found'))
return security_groups_inbound_rule_result
def _check_default_nacl_used(self, test_name):
default_nacl_used_result = []
for vpc_detail in self.all_vpc_details:
network_acls_response = self.aws_vpc_client.describe_network_acls(Filters=[{
'Name': 'vpc-id',
'Values': [vpc_detail['VpcId']]
}])
issue_found = False
if 'NetworkAcls' in network_acls_response and network_acls_response['NetworkAcls']:
for network_acls_dict in network_acls_response['NetworkAcls']:
if 'IsDefault' in network_acls_dict and network_acls_dict['IsDefault']:
issue_found = True
break
else:
issue_found = True
if issue_found:
default_nacl_used_result.append(self._append_vpc_test_result(vpc_detail, test_name, 'issue_found'))
else:
default_nacl_used_result.append(self._append_vpc_test_result(vpc_detail, test_name, 'no_issue_found'))
return default_nacl_used_result
def _check_vpc_dns_resolution_enabled(self, test_name):
vpc_dns_resolution_result = []
for vpc_detail in self.all_vpc_details:
dns_support_response = self.aws_vpc_client.describe_vpc_attribute(
Attribute='enableDnsSupport',
VpcId=vpc_detail['VpcId']
)
if 'EnableDnsSupport' in dns_support_response and dns_support_response['EnableDnsSupport'] and 'Value' in \
dns_support_response['EnableDnsSupport'] and dns_support_response['EnableDnsSupport']['Value']:
vpc_dns_resolution_result.append(self._append_vpc_test_result(vpc_detail, test_name, 'no_issue_found'))
else:
vpc_dns_resolution_result.append(self._append_vpc_test_result(vpc_detail, test_name, 'issue_found'))
return vpc_dns_resolution_result
def _check_vpc_unrestricted_icmp_access(self, test_name):
vpc_unrestricted_icmp_access = []
for vpc_detail in self.all_vpc_details:
issue_found = False
security_groups_response = self.aws_vpc_client.describe_security_groups(Filters=[{
'Name': 'vpc-id',
'Values': [vpc_detail['VpcId']]
}, {
'Name': 'ip-permission.protocol',
'Values': ['icmp']
}, {
'Name': 'ip-permission.cidr',
'Values': ['0.0.0.0/0']
}
, {
'Name': 'ip-permission.ipv6-cidr',
'Values': ['::/0']
}])
if security_groups_response and 'SecurityGroups' in security_groups_response and security_groups_response[
'SecurityGroups']:
for security_groups_response_dict in security_groups_response['SecurityGroups']:
if 'IpPermissions' in security_groups_response_dict and security_groups_response_dict[
'IpPermissions']:
issue_found = True
break
if issue_found:
vpc_unrestricted_icmp_access.append(self._append_vpc_test_result(vpc_detail, test_name, 'issue_found'))
else:
vpc_unrestricted_icmp_access.append(
self._append_vpc_test_result(vpc_detail, test_name, 'no_issue_found'))
return vpc_unrestricted_icmp_access
def _check_inbound_traffic(self, ):
inbound_traffic_result = []
for vpc_detail in self.all_vpc_details:
vpc_id = vpc_detail['VpcId']
response = self.aws_vpc_client.describe_network_acls(Filters=[{
'Name': 'vpc-id',
'Values': [vpc_id]
}, ])
inoutbound_allow_rule_number = []
inoutbound_deny_rule_number = []
inoutbound_allow_rule_asterisk = ''
if response and 'NetworkAcls' in response:
issue_found = False
for network_acl_rules_dict in response['NetworkAcls']:
if issue_found:
break
for network_acl_rules in network_acl_rules_dict['Entries']:
if 'Egress' in network_acl_rules and not network_acl_rules['Egress'] and network_acl_rules[
'CidrBlock'] == '0.0.0.0/0':
if network_acl_rules[
'RuleAction'].lower() == 'allow':
if str(network_acl_rules['RuleNumber']) == '*':
inoutbound_allow_rule_asterisk = '*'
else:
inoutbound_allow_rule_number.append(network_acl_rules['RuleNumber'])
else:
inoutbound_deny_rule_number.append(network_acl_rules['RuleNumber'])
inoutbound_allow_rule_number.sort()
inoutbound_deny_rule_number.sort()
if len(inoutbound_allow_rule_number) and len(
inoutbound_deny_rule_number) and inoutbound_allow_rule_number[0] <= \
inoutbound_deny_rule_number[
0] or inoutbound_allow_rule_asterisk == '*':
issue_found = True
if issue_found:
inbound_traffic_result.append(
self._append_vpc_test_result(vpc_detail, 'network_acl_inbound_traffic_is_restricted',
'issue_found'))
else:
inbound_traffic_result.append(
self._append_vpc_test_result(vpc_detail, 'network_acl_inbound_traffic_is_restricted',
'no_issue_found'))
return inbound_traffic_result
def _check_outbound_traffic(self):
outbound_traffic_result = []
for vpc_detail in self.all_vpc_details:
vpc_id = vpc_detail['VpcId']
response = self.aws_vpc_client.describe_network_acls(Filters=[{
'Name': 'vpc-id',
'Values': [vpc_id]
}, ])
outbound_allow_rule_number = []
outbound_deny_rule_number = []
outbound_allow_rule_asterisk = ''
if response and 'NetworkAcls' in response:
issue_found = False
for network_acl_rules_dict in response['NetworkAcls']:
if issue_found:
break
for network_acl_rules in network_acl_rules_dict['Entries']:
if 'Egress' in network_acl_rules and network_acl_rules['Egress'] and network_acl_rules[
'CidrBlock'] == '0.0.0.0/0':
if network_acl_rules[
'RuleAction'].lower() == 'allow':
if str(network_acl_rules['RuleNumber']) == '*':
outbound_allow_rule_asterisk = '*'
else:
outbound_allow_rule_number.append(network_acl_rules['RuleNumber'])
else:
outbound_deny_rule_number.append(network_acl_rules['RuleNumber'])
outbound_allow_rule_number.sort()
outbound_deny_rule_number.sort()
if len(outbound_allow_rule_number) and len(
outbound_deny_rule_number) and outbound_allow_rule_number[0] <= outbound_deny_rule_number[
0] or outbound_allow_rule_asterisk == '*':
issue_found = True
if issue_found:
outbound_traffic_result.append(
self._append_vpc_test_result(vpc_detail, 'network_acl_outbound_traffic_is_restricted',
'issue_found'))
else:
outbound_traffic_result.append(
self._append_vpc_test_result(vpc_detail, 'network_acl_outbound_traffic_is_restricted',
'no_issue_found'))
return outbound_traffic_result
def _all_check_unrestricted_ssh_access(self, response):
issue_list = []
if 'SecurityGroups' in response and response['SecurityGroups']:
for security_group_dict in response['SecurityGroups']:
for ip_permission_dict in security_group_dict['IpPermissions']:
if ip_permission_dict['IpProtocol'] in ['tcp', '6', '-1'] and (
('FromPort' in ip_permission_dict and ip_permission_dict[
'FromPort'] <= 22 and 'ToPort' in ip_permission_dict and ip_permission_dict[
'ToPort'] >= 22) or (
str('FromPort' in ip_permission_dict and ip_permission_dict[
'FromPort']) == '-1' and str(
'ToPort' in ip_permission_dict and ip_permission_dict['ToPort']) == '-1')):
issue_list.append(security_group_dict['GroupId'])
break
return issue_list
def _find_all_vpc_unrestricted_protocol_access(self, response, port_number_list, protocol_list):
issue_list = []
if 'SecurityGroups' in response and response['SecurityGroups']:
for security_group_dict in response['SecurityGroups']:
for ip_permission_dict in security_group_dict['IpPermissions']:
for port_number in port_number_list:
if ip_permission_dict['IpProtocol'] in protocol_list and (
('FromPort' in ip_permission_dict and ip_permission_dict[
'FromPort'] <= port_number and 'ToPort' in ip_permission_dict and
ip_permission_dict[
'ToPort'] >= port_number) or (
str('FromPort' in ip_permission_dict and ip_permission_dict[
'FromPort']) == '-1' and str(
'ToPort' in ip_permission_dict and ip_permission_dict['ToPort']) == '-1')):
issue_list.append(security_group_dict['GroupId'])
if issue_list:
break
return issue_list
def _find_security_group_response(self, port_number, protocol_list, test_name):
result = []
for vpc_detail in self.all_vpc_details:
ipv4_response = self.aws_vpc_client.describe_security_groups(
Filters=[
{
'Name': 'vpc-id',
'Values': [vpc_detail['VpcId']]
},
{'Name': "ip-permission.cidr", "Values": ['0.0.0.0/0']}
])
issue_list = self._find_all_vpc_unrestricted_protocol_access(ipv4_response, port_number, protocol_list)
ipv6_response = self.aws_vpc_client.describe_security_groups(
Filters=[
{
'Name': 'vpc-id',
'Values': [vpc_detail['VpcId']]
},
{'Name': 'ip-permission.ipv6-cidr', 'Values': ['::/0']}
])
issue_list.extend(
self._find_all_vpc_unrestricted_protocol_access(ipv6_response, port_number, protocol_list))
issue_found = list(dict.fromkeys(issue_list))
if issue_found:
vpc_id = vpc_detail['VpcId']
for data in issue_found:
vpc_detail['VpcId'] = vpc_id + '@@' + data
result.append(
self._append_vpc_test_result(vpc_detail, test_name, 'issue_found'))
vpc_detail['VpcId'] = vpc_id
else:
result.append(
self._append_vpc_test_result(vpc_detail, test_name, 'no_issue_found'))
return result
def _append_epi_test_result(self, eip_detail, test_name, issue_status):
return {
"user": self.user_id,
"account_arn": self.account_arn,
"account": self.account_id,
"timestamp": time.time(),
"item": eip_detail['AllocationId'],
"item_type": "vpc_elastic_ip",
"test_name": test_name,
"test_result": issue_status
}
def detect_vpc_logging_status(self) -> list:
return self._check_logging_status('vpc_flow_logging_is_enabled_in_all_vpcs')
def detect_vpc_endpoint_publicly_accessibility(self):
return self._check_vpc_public_accessibility('vpc_endpoint_publicly_accessible')
def detect_vpc_network_acl_inbound_and_outbound_traffic_rules(self):
return self._check_outbound_traffic() + self._check_inbound_traffic()
def detect_network_acl_restriction_status(self):
return self._check_ingress_administration_ports_range_for_network_acls_inbound_rule(
'network_acl_do_not_allow_ingress_from_0.0.0.0/0_to_remote_server_administration_ports')
def detect_securitygroup_inbound_rule_without_specified_protocol(self):
return self._check_securitygroup_inbound_rule_without_specified_protocol(
'vpc_securitygroup_inbound_rule_without_specified_protocol')
def detect_default_nacl_used(self):
return self._check_default_nacl_used('vpc_default_nacl_used')
def detect_vpc_dnc_resolution_enabled(self):
return self._check_vpc_dns_resolution_enabled('vpc_default_nacl_used')
def detect_vpc_unrestricted_icmp_access(self):
return self._check_vpc_unrestricted_icmp_access('vpc_unrestricted_icmp_access')
def detect_public_and_not_encrypted_ami_images(self):
public_ami_result = []
encrypted_ami_result = []
for ami_images_dict in self.all_ami_images:
issue_found_on_public_acm = False
issue_found_on_encrypted_acm = False
if 'Public' in ami_images_dict and ami_images_dict['Public']:
issue_found_on_public_acm = True
if 'BlockDeviceMappings' in ami_images_dict and ami_images_dict['BlockDeviceMappings']:
for blocked_device_dict in ami_images_dict['BlockDeviceMappings']:
if 'Ebs' in blocked_device_dict and blocked_device_dict['Ebs'] and 'Encrypted' in \
blocked_device_dict['Ebs'] and blocked_device_dict['Ebs']['Encrypted']:
issue_found_on_encrypted_acm = True
break
else:
issue_found_on_encrypted_acm = True
if issue_found_on_public_acm:
public_ami_result.append(
self._append_vpc_acm_test_result(ami_images_dict['ImageId'], 'public_ami_detected', 'issue_found'))
else:
public_ami_result.append(
self._append_vpc_acm_test_result(ami_images_dict['ImageId'], 'public_ami_detected',
'no_issue_found'))
if issue_found_on_encrypted_acm:
encrypted_ami_result.append(self._append_vpc_acm_test_result(ami_images_dict['ImageId'],
'source_ami_snapshot_is_not_encrypted',
'issue_found'))
else:
encrypted_ami_result.append(self._append_vpc_acm_test_result(ami_images_dict['ImageId'],
'source_ami_snapshot_is_not_encrypted',
'no_issue_found'))
return public_ami_result + encrypted_ami_result
def detect_unrestricted_ssh_access(self):
unrestricted_ssh_access_result = []
for vpc_detail in self.all_vpc_details:
vpc_id = vpc_detail['VpcId']
response = self.aws_vpc_client.describe_security_groups(
Filters=[
{
'Name': 'vpc-id',
'Values': [vpc_id]
},
{'Name': "ip-permission.cidr", "Values": ['0.0.0.0/0']}
]
)
issue_found = self._all_check_unrestricted_ssh_access(response)
ipv6_response = self.aws_vpc_client.describe_security_groups(
Filters=[
{
'Name': 'vpc-id',
'Values': [vpc_id]
},
{'Name': 'ip-permission.ipv6-cidr', 'Values': ['::/0']}
])
issue_found.extend(self._all_check_unrestricted_ssh_access(ipv6_response))
issue_found = list(dict.fromkeys(issue_found))
if issue_found:
vpc_id = vpc_detail['VpcId']
for data in issue_found:
vpc_detail['VpcId'] = vpc_id + '@@' + data
unrestricted_ssh_access_result.append(
self._append_vpc_test_result(vpc_detail, 'unrestricted_ssh_access', 'issue_found'))
else:
unrestricted_ssh_access_result.append(
self._append_vpc_test_result(vpc_detail, 'unrestricted_ssh_access', 'no_issue_found'))
return unrestricted_ssh_access_result
def detect_vpc_peering_connection(self):
vpc_peering_connection_status = []
for vpc_detail in self.all_vpc_details:
issue_found = []
vpc_peering_connection_response = self.aws_vpc_client.describe_vpc_peering_connections(Filters=[
{
'Name': 'requester-vpc-info.vpc-id',
'Values': [vpc_detail['VpcId']]
}
])
if vpc_peering_connection_response and 'VpcPeeringConnections' in vpc_peering_connection_response and \
vpc_peering_connection_response['VpcPeeringConnections']:
for vpc_peering_connection_dict in vpc_peering_connection_response['VpcPeeringConnections']:
if vpc_peering_connection_dict['AccepterVpcInfo']['OwnerId'] != \
vpc_peering_connection_dict['RequesterVpcInfo']['OwnerId']:
issue_found.append(vpc_peering_connection_dict['VpcPeeringConnectionId'])
if issue_found:
vpc_id = vpc_detail['VpcId']
for data in issue_found:
vpc_detail['VpcId'] = vpc_id + '@@' + data
vpc_peering_connection_status.append(
self._append_vpc_test_result(vpc_detail, 'unauthorized_vpc_peering', 'issue_found'))
else:
vpc_peering_connection_status.append(
self._append_vpc_test_result(vpc_detail, 'unauthorized_vpc_peering', 'no_issue_found'))
return vpc_peering_connection_status
def detect_vpc_unrestricted_smb_access(self):
return self._find_security_group_response([445], ['tcp', '6', '-1'], 'vpc_unrestricted_smb_access')
def detect_vpc_unrestricted_dns_tcp_access(self):
return self._find_security_group_response([53], ['tcp', '6', '-1'], 'vpc_unrestricted_dns_tcp_access')
def detect_vpc_unrestricted_vnc_server_access(self):
return self._find_security_group_response([5800, 5900], ['tcp', '6', '-1'],
'vpc_unrestricted_vnc_server_access')
def detect_vpc_unrestricted_dns_udp_access(self):
return self._find_security_group_response([53], ['udp', '17', '-1'], 'vpc_unrestricted_dns_udp_access')
def detect_vpc_unrestricted_ftp_access(self):
return self._find_security_group_response([21], ['tcp', '6', '-1'], 'vpc_unrestricted_ftp_access')
def detect_vpc_unrestricted_cifs_access(self):
return self._find_security_group_response([445], ['udp', '17', '-1'], 'vpc_unrestricted_cifs_access')
def detect_vpc_default_security_groups_in_use(self):
result = []
test_name = 'vpc_default_security_groups_in_use'
all_ec2_instance = []
ec2_response = self.aws_vpc_client.describe_instances()
if ec2_response and 'Reservations' in ec2_response and ec2_response['Reservations']:
for reservations_dict in ec2_response['Reservations']:
if 'Instances' in reservations_dict and reservations_dict['Instances']:
all_ec2_instance.extend(reservations_dict['Instances'])
for ec2_instance_dict in all_ec2_instance:
response = self.aws_vpc_client.describe_security_groups(
Filters=[
{
'Name': 'group-id',
'Values': [security_group_dict['GroupId'] for security_group_dict in
ec2_instance_dict['SecurityGroups']]
}
])
if 'SecurityGroups' in response and response['SecurityGroups']:
for security_groups_dict in response['SecurityGroups']:
if 'GroupName' in security_groups_dict and security_groups_dict['GroupName'] == 'default':
ec2_instance_dict['VpcId'] = security_groups_dict['VpcId'] + '@@' + security_groups_dict[
'GroupId']
result.append(self._append_vpc_test_result(ec2_instance_dict, test_name, 'issue_found'))
ec2_instance_dict['VpcId'] = security_groups_dict['VpcId']
else:
result.append(self._append_vpc_test_result(ec2_instance_dict, test_name, 'no_issue_found'))
return result
def detect_vpc_unrestricted_telnet_access(self):
return self._find_security_group_response([23], ['tcp', '6', '-1'], 'vpc_unrestricted_telnet_access')
def detect_vpc_unrestricted_rdp_access(self):
return self._find_security_group_response([3389], ['tcp', '6', '-1'], 'vpc_unrestricted_rdp_access')
def detect_vpc_unrestricted_ftp_data_access(self):
return self._find_security_group_response([20], ['tcp', '6', '-1'], 'vpc_unrestricted_ftp_data_access')
def detect_vpc_unrestricted_smtp_access(self):
return self._find_security_group_response([25], ['tcp', '6', '-1'], 'vpc_unrestricted_smtp_access')
def detect_vpc_unrestricted_sql_server_tcp_access(self):
return self._find_security_group_response([1433], ['tcp', '6', '-1'], 'vpc_unrestricted_sql_server_tcp_access')
def detect_vpc_unrestricted_sql_server_udp_access(self):
return self._find_security_group_response([1433], ['udp', '17', '-1'], 'vpc_unrestricted_sql_server_udp_access')
def detect_vpc_unrestricted_net_bios_access(self):
return self._find_security_group_response([137, 138], ['udp', '17', '-1'], 'vpc_unrestricted_net_bios_access')
def detect_vpc_unrestricted_mysql_access(self):
return self._find_security_group_response([4333], ['tcp', '6', '-1'], 'vpc_unrestricted_mysql_access')
def detect_vpc_unrestricted_postgre_sql_access(self):
return self._find_security_group_response([5432], ['tcp', '6', '-1'], 'vpc_unrestricted_postgre_sql_access')
def detect_vpc_unrestricted_vnc_listener_access(self):
return self._find_security_group_response([5500], ['tcp', '6', '-1'], 'vpc_unrestricted_vnc_listener_access')
def detect_vpc_eip_in_use(self):
result = []
test_name = 'vpc_ip_address_is_attached_to_a_host_or_eni'
response = self.aws_vpc_client.describe_addresses()
for address_dict in response['Addresses']:
if 'AssociationId' not in address_dict or (
'AssociationId' in address_dict and not address_dict['AssociationId']):
result.append(self._append_epi_test_result(address_dict, test_name, 'issue_found'))
else:
result.append(self._append_epi_test_result(address_dict, test_name, 'no_issue_found'))
return result
def detect_vpc_security_group_per_vpc_limit(self):
result = []
test_name = 'detect_vp_security_group_per_vpc_limit'
for vpc_detail in self.all_vpc_details:
security_groups_response = self.aws_vpc_client.describe_security_groups(
Filters=[{'Name': 'vpc-id', 'Values': [vpc_detail['VpcId']]}], MaxResults=451)
count = len(security_groups_response['SecurityGroups'])
if count >= 450:
result.append(self._append_vpc_test_result(vpc_detail, test_name, 'issue_found'))
else:
result.append(self._append_vpc_test_result(vpc_detail, test_name, 'no_issue_found'))
return result
| 52.223032
| 120
| 0.588193
|
4a05f13866efb2e81f652438cd856acad4f0f1e0
| 1,866
|
py
|
Python
|
react_new.py
|
megapod/create-react-web-app-from-cache
|
41457e29a4d6acdfc8cae408917de589e35d8145
|
[
"MIT"
] | null | null | null |
react_new.py
|
megapod/create-react-web-app-from-cache
|
41457e29a4d6acdfc8cae408917de589e35d8145
|
[
"MIT"
] | null | null | null |
react_new.py
|
megapod/create-react-web-app-from-cache
|
41457e29a4d6acdfc8cae408917de589e35d8145
|
[
"MIT"
] | null | null | null |
import subprocess
import os
import sys
# if a project name was passed as a cli argument.
if len(sys.argv) > 1:
# extract the template to the new project directory
try:
subprocess.run(
["7z", "x", "react_project_template.7z", "-o./" + sys.argv[1]],
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
exit(0)
except IndexError:
print("Failed!")
else:
print("Successfully created the new project " + sys.argv[1])
else:
print('Updating...')
# update procedure from here on
# extract the template
try:
subprocess.run(
["7z", "x", "react_project_template.7z", "-o./react_project_template"],
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
except OSError:
print("Couldn't find: react_project_template.7z")
else:
print("Successfully extracted the react_project_template.7z file")
# get the path
dir_path = os.path.dirname(os.path.realpath(__file__))
extracted_path = dir_path + "/react_project_template/"
# update the template
try:
# Change Working Directory and update
subprocess.run(
["npm", "update"], cwd=extracted_path)
except OSError:
print("Couldn't update node packages")
else:
print("Successfully updated template")
# repackage the template
try:
subprocess.run(
["7z", "u", "react_project_template", "./react_project_template/."],
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
except OSError:
print("Couldn't Overwrite: react_project_template.7z")
else:
print("Successfully updated react_project_template.7z")
# cleanup
try:
# delete the intermidiate folder
subprocess.run(
["rm", "-rf", "./react_project_template"])
except OSError:
print("Couldn't delete intermidiate folder: react_project_template")
else:
print("Successfully deleted the intermidiate folder")
| 29.15625
| 79
| 0.691854
|
4a05f1a51a603bb275f191c85025605717ffa6fa
| 10,531
|
py
|
Python
|
bin/wgc/quota.py
|
orionzhou/nf
|
cb56f9b17c7c9352e34a3d89c8c38b777085a057
|
[
"MIT"
] | null | null | null |
bin/wgc/quota.py
|
orionzhou/nf
|
cb56f9b17c7c9352e34a3d89c8c38b777085a057
|
[
"MIT"
] | null | null | null |
bin/wgc/quota.py
|
orionzhou/nf
|
cb56f9b17c7c9352e34a3d89c8c38b777085a057
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Quota synteny alignment (QUOTA-ALIGN)
%prog [options] anchorsfile --qbed=qbedfile --sbed=sbedfile
This python program does the following:
1. merge 2D-overlapping blocks (now skipped, but existed in original version)
2. build constraints that represent 1D-overlap among blocks
3. feed the data into the linear programming solver
The algorithm is described in Tang et al. BMC Bioinformatics 2011.
"Screening synteny blocks in pairwise genome comparisons through integer
programming."
"""
from __future__ import print_function
import os.path as op
import sys
from six.moves import StringIO
import logging
from jcvi.utils.range import range_overlap
from jcvi.utils.grouper import Grouper
from jcvi.algorithms.lpsolve import GLPKSolver, SCIPSolver
from jcvi.compara.synteny import AnchorFile, _score, check_beds
from jcvi.formats.base import must_open
from jcvi.apps.base import OptionParser
def get_1D_overlap(eclusters, depth=1):
"""
Find blocks that are 1D overlapping,
returns cliques of block ids that are in conflict
"""
overlap_set = set()
active = set()
ends = []
for i, (chr, left, right) in enumerate(eclusters):
ends.append((chr, left, 0, i)) # 0/1 for left/right-ness
ends.append((chr, right, 1, i))
ends.sort()
chr_last = ""
for chr, pos, left_right, i in ends:
if chr != chr_last:
active.clear()
if left_right == 0:
active.add(i)
else:
active.remove(i)
if len(active) > depth:
overlap_set.add(tuple(sorted(active)))
chr_last = chr
return overlap_set
def get_2D_overlap(chain, eclusters):
"""
Implements a sweep line algorithm, that has better running time than naive O(n^2):
assume block has x_ends, and y_ends for the bounds
1. sort x_ends, and take a sweep line to scan the x_ends
2. if left end, test y-axis intersection of current block with `active` set;
also put this block in the `active` set
3. if right end, remove block from the `active` set
"""
mergeables = Grouper()
active = set()
x_ends = []
for i, (range_x, range_y, score) in enumerate(eclusters):
chr, left, right = range_x
x_ends.append((chr, left, 0, i)) # 0/1 for left/right-ness
x_ends.append((chr, right, 1, i))
x_ends.sort()
chr_last = ""
for chr, pos, left_right, i in x_ends:
if chr != chr_last:
active.clear()
if left_right == 0:
active.add(i)
for x in active:
# check y-overlap
if range_overlap(eclusters[x][1], eclusters[i][1]):
mergeables.join(x, i)
else: # right end
active.remove(i)
chr_last = chr
return mergeables
def make_range(clusters, extend=0):
"""
Convert to interval ends from a list of anchors
extend modifies the xmax, ymax boundary of the box,
which can be positive or negative
very useful when we want to make the range as fuzzy as we specify
"""
eclusters = []
for cluster in clusters:
xlist, ylist, scores = zip(*cluster)
score = _score(cluster)
xchr, xmin = min(xlist)
xchr, xmax = max(xlist)
ychr, ymin = min(ylist)
ychr, ymax = max(ylist)
# allow fuzziness to the boundary
xmax += extend
ymax += extend
# because extend can be negative values, we don't want it to be less than min
if xmax < xmin:
xmin, xmax = xmax, xmin
if ymax < ymin:
ymin, ymax = ymax, ymin
eclusters.append(((xchr, xmin, xmax), (ychr, ymin, ymax), score))
return eclusters
def get_constraints(clusters, quota=(1, 1), Nmax=0):
"""
Check pairwise cluster comparison, if they overlap then mark edge as conflict
"""
qa, qb = quota
eclusters = make_range(clusters, extend=-Nmax)
# (1-based index, cluster score)
nodes = [(i + 1, c[-1]) for i, c in enumerate(eclusters)]
eclusters_x, eclusters_y, scores = zip(*eclusters)
# represents the contraints over x-axis and y-axis
constraints_x = get_1D_overlap(eclusters_x, qa)
constraints_y = get_1D_overlap(eclusters_y, qb)
return nodes, constraints_x, constraints_y
def format_lp(nodes, constraints_x, qa, constraints_y, qb):
"""
Maximize
4 x1 + 2 x2 + 3 x3 + x4
Subject To
x1 + x2 <= 1
End
"""
lp_handle = StringIO()
lp_handle.write("Maximize\n ")
records = 0
for i, score in nodes:
lp_handle.write("+ %d x%d " % (score, i))
# SCIP does not like really long string per row
records += 1
if records % 10 == 0:
lp_handle.write("\n")
lp_handle.write("\n")
num_of_constraints = 0
lp_handle.write("Subject To\n")
for c in constraints_x:
additions = " + ".join("x%d" % (x + 1) for x in c)
lp_handle.write(" %s <= %d\n" % (additions, qa))
num_of_constraints += len(constraints_x)
# non-self
if not (constraints_x is constraints_y):
for c in constraints_y:
additions = " + ".join("x%d" % (x + 1) for x in c)
lp_handle.write(" %s <= %d\n" % (additions, qb))
num_of_constraints += len(constraints_y)
print(
"number of variables (%d), number of constraints (%d)"
% (len(nodes), num_of_constraints),
file=sys.stderr,
)
lp_handle.write("Binary\n")
for i, score in nodes:
lp_handle.write(" x%d\n" % i)
lp_handle.write("End\n")
lp_data = lp_handle.getvalue()
lp_handle.close()
return lp_data
def solve_lp(
clusters,
quota,
work_dir="work",
Nmax=0,
self_match=False,
solver="SCIP",
verbose=False,
):
"""
Solve the formatted LP instance
"""
qb, qa = quota # flip it
nodes, constraints_x, constraints_y = get_constraints(clusters, (qa, qb), Nmax=Nmax)
if self_match:
constraints_x = constraints_y = constraints_x | constraints_y
lp_data = format_lp(nodes, constraints_x, qa, constraints_y, qb)
if solver == "SCIP":
filtered_list = SCIPSolver(lp_data, work_dir, verbose=verbose).results
if not filtered_list:
print("SCIP fails... trying GLPK", file=sys.stderr)
filtered_list = GLPKSolver(lp_data, work_dir, verbose=verbose).results
elif solver == "GLPK":
filtered_list = GLPKSolver(lp_data, work_dir, verbose=verbose).results
if not filtered_list:
print("GLPK fails... trying SCIP", file=sys.stderr)
filtered_list = SCIPSolver(lp_data, work_dir, verbose=verbose).results
return filtered_list
def read_clusters(qa_file, qorder, sorder):
af = AnchorFile(qa_file)
blocks = af.blocks
clusters = []
for block in blocks:
cluster = []
for a, b, score in block:
ia, oa = qorder[a]
ib, ob = sorder[b]
ca, cb = oa.seqid, ob.seqid
cluster.append(((ca, ia), (cb, ib), score))
clusters.append(cluster)
return clusters
def main(args):
p = OptionParser(__doc__)
p.set_beds()
p.add_option(
"--quota",
default="1:1",
help="`quota mapping` procedure -- screen blocks to constrain mapping"
" (useful for orthology), "
"put in the format like (#subgenomes expected for genome X):"
"(#subgenomes expected for genome Y) "
"[default: %default]",
)
p.add_option(
"--Nm",
dest="Nmax",
type="int",
default=10,
help="distance cutoff to tolerate two blocks that are "
"slightly overlapping (cutoff for `quota mapping`) "
"[default: %default units (gene or bp dist)]",
)
supported_solvers = ("SCIP", "GLPK")
p.add_option(
"--self",
dest="self_match",
action="store_true",
default=False,
help="you might turn this on when screening paralogous blocks, "
"esp. if you have reduced mirrored blocks into non-redundant set",
)
p.add_option(
"--solver",
default="SCIP",
choices=supported_solvers,
help="use MIP solver [default: %default]",
)
p.set_verbose(help="Show verbose solver output")
p.add_option(
"--screen",
default=False,
action="store_true",
help="generate new anchors file [default: %default]",
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
qa_file, = args
qbed, sbed, qorder, sorder, is_self = check_beds(qa_file, p, opts)
# sanity check for the quota
if opts.quota:
try:
qa, qb = opts.quota.split(":")
qa, qb = int(qa), int(qb)
except:
print(
"quota string should be the form x:x (2:4, 1:3, etc.)", file=sys.stderr
)
sys.exit(1)
if opts.self_match and qa != qb:
raise Exception(
"when comparing genome to itself, "
"quota must be the same number "
"(like 1:1, 2:2) you have %s" % opts.quota
)
quota = (qa, qb)
self_match = opts.self_match
clusters = read_clusters(qa_file, qorder, sorder)
for cluster in clusters:
assert len(cluster) > 0
# below runs `quota mapping`
work_dir = op.join(op.dirname(op.abspath(qa_file)), "work")
selected_ids = solve_lp(
clusters,
quota,
work_dir=work_dir,
Nmax=opts.Nmax,
self_match=self_match,
solver=opts.solver,
verbose=opts.verbose,
)
logging.debug("Selected {0} blocks.".format(len(selected_ids)))
prefix = qa_file.rsplit(".", 1)[0]
suffix = "{0}x{1}".format(qa, qb)
outfile = ".".join((prefix, suffix))
fw = must_open(outfile, "w")
print(",".join(str(x) for x in selected_ids), file=fw)
fw.close()
logging.debug("Screened blocks ids written to `{0}`.".format(outfile))
if opts.screen:
from jcvi.compara.synteny import screen
new_qa_file = ".".join((prefix, suffix, "anchors"))
largs = [qa_file, new_qa_file, "--ids", outfile]
if opts.qbed and opts.sbed:
largs += ["--qbed={0}".format(opts.qbed)]
largs += ["--sbed={0}".format(opts.sbed)]
screen(largs)
if __name__ == "__main__":
main(sys.argv[1:])
| 28.852055
| 88
| 0.600608
|
4a05f1c1b4025f03075abd9ec1d4d3ace24a66ec
| 1,775
|
py
|
Python
|
test/appendixA/test_dFBT.py
|
omelchert/dFBT-FJ
|
31ba7b2733558ad3096fa0de689407a22bbda5e8
|
[
"BSD-3-Clause"
] | 1
|
2020-12-16T10:58:25.000Z
|
2020-12-16T10:58:25.000Z
|
test/appendixA/test_dFBT.py
|
omelchert/dFBT-FJ
|
31ba7b2733558ad3096fa0de689407a22bbda5e8
|
[
"BSD-3-Clause"
] | null | null | null |
test/appendixA/test_dFBT.py
|
omelchert/dFBT-FJ
|
31ba7b2733558ad3096fa0de689407a22bbda5e8
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import dFBT
import scipy.special as scs
import numpy as np
def FBPair():
f = lambda r: np.exp(-r*r/4/np.pi)
F0 = lambda r: np.exp(-r*r*np.pi)*2*np.pi
return f, F0
def eRMS(Fn,Fx):
return np.sqrt(((Fn-Fx)**2).mean()/(Fx*Fx).mean())
class FourierBesselTransformTestCase(unittest.TestCase):
def setUp(self):
self.r = np.linspace(0.0, 20.0, 1000)
self.f, self.Fx = FBPair()
self.T, self.N = 18.0, 20
def tearDown(self):
del self.N, self.T, self.r, self.f, self.Fx
def test_dFBT_fourierPair(self):
rhoFJC,F0FJC,T = dFBT.fwdTrafo(self.r,self.f,self.T,self.N)
F0FJCEx = dFBT.extrapolate(self.r,F0FJC,self.T)
print eRMS(F0FJCEx,self.Fx(self.r))
self.assertLessEqual(eRMS(F0FJCEx,self.Fx(self.r)), 1e-6)
def test_dFBT_selfReciprocality(self):
rhoFJC,F0FJC,T = dFBT.fwdTrafo(self.r,self.f,self.T,self.N)
fFJC = dFBT.bckwdTrafo(self.r,F0FJC,self.T)
print eRMS(fFJC,self.f(self.r))
self.assertLessEqual(eRMS(fFJC,self.f(self.r)), 1e-6)
def test_dFBT_generalizedParsevalTheorem(self):
j = scs.jn_zeros(0,self.N)
Fm = np.zeros(self.N)
Y = lambda m,k: 2.0*scs.j0(j[m]*j[k]/j[-1])/j[-1]/scs.j1(j[k])**2
rhoFJC,F0FJC,T = dFBT.fwdTrafo(self.r,self.f,self.T,self.N)
fk = F0FJC*2./(self.T*self.T*scs.j1(j)**2)
for im in range(self.N):
for ik in range(self.N):
Fm[im] += Y(im,ik)*fk[ik]
fkScaled = fk /scs.j1(j)
FmScaled = Fm /scs.j1(j)
print np.sum(FmScaled**2) - np.sum(fkScaled**2)
self.assertAlmostEqual(np.sum(FmScaled**2), np.sum(fkScaled**2), 6)
if __name__ == "__main__":
unittest.main()
| 29.098361
| 75
| 0.591549
|
4a05f1ca1ef669ad3006a53f2b09e833ffd0c47c
| 1,430
|
py
|
Python
|
workflow/notebooks/analysis/scripts/imports.py
|
CambridgeSemiticsLab/BH_time_collocations
|
2d1864b6e9cd26624c769ee1e970d69d19da7fbf
|
[
"CC-BY-4.0"
] | 5
|
2019-06-19T19:42:21.000Z
|
2021-04-20T22:43:45.000Z
|
workflow/notebooks/analysis/scripts/imports.py
|
CambridgeSemiticsLab/BHTenseAndAspect
|
2d1864b6e9cd26624c769ee1e970d69d19da7fbf
|
[
"CC-BY-4.0"
] | 2
|
2020-02-25T10:19:40.000Z
|
2020-03-13T15:29:01.000Z
|
workflow/notebooks/analysis/scripts/imports.py
|
CambridgeSemiticsLab/BHTenseAndAspect
|
2d1864b6e9cd26624c769ee1e970d69d19da7fbf
|
[
"CC-BY-4.0"
] | null | null | null |
"""
Standard imports for the analysis notebooks.
"""
import re
import json
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
pd.set_option('display.max_rows', 200)
idx = pd.IndexSlice
from adjustText import adjust_text
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
scaler = StandardScaler()
from scripts.stats.pca import apply_pca
from scripts.stats import significance as sig
from scripts.df_styles import TextShower, df_highlighter
from scripts.counting import pivot_ct, join_ct_pr
import numpy as np
from bidi.algorithm import get_display as bidi_get_display
def remove_shindots(string):
"""Remove dots from ש"""
return(
string
.replace('\u05c1', '')
.replace('\u05c2', '')
)
# mod get display to remove shin dots
def get_display(string):
rem_accents = ''.join(re.findall('[\u05D0-\u05EA]', string))
return bidi_get_display(rem_accents)
# latex tags
def textheb(string):
return '\\texthebrew{%s}'%string
# custom modules
from .paths import paths
from .export import Exporter
from .plotting import heatmap
from .string_refs import get_verserefs
# load the data
df = pd.read_csv(paths['time_dataset'], index_col='node')
df_sg = df.query("(n_times == 1) and (is_advb == False)").copy()
# pretty-show Hebrew text from a df
ts = TextShower(
default=['verse', 'clause'],
stylize=['clause']
)
| 25.535714
| 64
| 0.731469
|
4a05f2046d838c38e363d8c7f5389e6438f1b560
| 10,045
|
py
|
Python
|
process/LidarAutoCalibration.py
|
sameeptandon/sail-car-log
|
0ee3d598bb09d389bcbd2ebf73cd4b2411e796be
|
[
"BSD-2-Clause"
] | 1
|
2021-02-24T03:11:13.000Z
|
2021-02-24T03:11:13.000Z
|
process/LidarAutoCalibration.py
|
sameeptandon/sail-car-log
|
0ee3d598bb09d389bcbd2ebf73cd4b2411e796be
|
[
"BSD-2-Clause"
] | null | null | null |
process/LidarAutoCalibration.py
|
sameeptandon/sail-car-log
|
0ee3d598bb09d389bcbd2ebf73cd4b2411e796be
|
[
"BSD-2-Clause"
] | 3
|
2015-03-18T14:36:04.000Z
|
2018-07-04T02:57:24.000Z
|
from LidarTransforms import *
import sys, os
from VideoReader import *
import cv2
from cv2 import imshow, waitKey
from numpy.linalg import norm
from ColorMap import *
from numpy import exp, log, sqrt
from transformations import euler_matrix
import scipy.weave
import itertools
from ArgParser import *
def computeDistanceTransform(D, gamma, alpha):
logD = np.log(D);
logD = logD.astype(np.float32)
logD = computeLogDistanceTransform(logD, gamma)
F = np.exp(logD)
return alpha*D + (1-alpha)*F
def computeLogDistanceTransformSlow(D, gamma):
# assume that D is logarithmic in the edges
width = D.shape[0]
height = D.shape[1]
lg = log(gamma)
for x in range(1,width):
for y in range(1,height):
D[x,y] = max(D[x,y], D[x-1,y]+lg, D[x,y-1]+lg, D[x-1,y-1]+lg)
for x in reversed(range(width-1)):
for y in reversed(range(height-1)):
D[x,y] = max(D[x,y], D[x+1,y]+lg, D[x,y+1]+lg, D[x+1,y+1]+lg)
#print D
return D
def computeLogDistanceTransform(D, gamma):
# assume that D is logarithmic in the edges
width = D.shape[0]
height = D.shape[1]
lg = log(gamma)
code = \
"""
using namespace std;
for (int x = 1; x < width; x++) {
for (int y = 1; y < height; y++) {
float l = lg;
float p1 = D(x,y);
float p2 = D(x-1,y) + l;
float p3 = D(x,y-1) + l;
float p4 = D(x-1,y-1) + l;
D(x,y) = max(p1,max(p2,max(p3,p4)));
}
}
for (int x = width-2; x >= 0 ; x--) {
for (int y = height-2; y >= 0; y--) {
float l = lg;
float p1 = D(x,y);
float p2 = D(x+1,y) + l;
float p3 = D(x,y+1) + l;
float p4 = D(x+1,y+1) + l;
D(x,y) = max(p1,max(p2,max(p3,p4)));
}
}
"""
scipy.weave.inline(code, ['D', 'width', 'height', 'lg'], headers=['<algorithm>'],
type_converters=scipy.weave.converters.blitz)
return D
def generateEdgeFilterKernels():
kernels = []
for x in range(3):
for y in range(3):
K = np.zeros((3,3))
K[1,1] = 1.0
K[x,y] = -1.0
if (x != 1 and y != 1):
kernels.append(K)
return kernels
def processPointCloud(raw_pts):
# add rotational angle and distance to pts
pts = np.zeros((raw_pts.shape[0], raw_pts.shape[1]+2), dtype=np.float32)
pts[:,:-2] = raw_pts
pts[:,-2] = np.arctan2(pts[:,1], pts[:,0]) + np.pi
pts[:,-1] = np.sqrt(np.sum( pts[:, 0:3] ** 2, axis=1 ))
pts = pts[ pts[:,-2].argsort() ] # sort on rotational angle
pts = pts[ pts[:,4].argsort(kind='mergesort') ] # stable sort on laser num
pts[0,3] = 0.0;
pts[-1,3] = 0.0
"""
pts[1:-1,3] = np.maximum(pts[0:-2,-1] - pts[1:-1, -1],
pts[2:, -1] - pts[1:-1, -1])
pts[1:-1,3] = np.maximum(pts[1:-1,3], 0)
"""
for idx in range(1,pts.shape[0]-1):
if pts[idx,4] == pts[idx-1,4] and pts[idx,4] == pts[idx+1,4]:
pts[idx,3] = max(pts[idx-1,-1] - pts[idx,-1],
pts[idx+1,-1] - pts[idx,-1],
0)
else:
pts[idx,3] = 0.0
#pts = pts[pts[:,0] > 0, :]
pts = pts[pts[:,3] > 2.0, :]
return pts
def computeReprojection(C, raw_pts, cam):
pts = raw_pts[:, 0:3].copy()
pts[:, 0] += C[0]
pts[:, 1] += C[1]
pts[:, 2] += C[2]
R = euler_matrix(C[3], C[4], C[5])[0:3,0:3]
pts_wrt_cam = np.dot(R, np.dot(R_to_c_from_l_old(cam), pts.transpose()))
pix = np.around(np.dot(cam['KK'], np.divide(pts_wrt_cam[0:3,:], pts_wrt_cam[2, :])))
pix = pix.astype(np.int32)
return (pix, pts_wrt_cam)
def computeMask(pix, pts_wrt_cam):
width = 8
mask = np.logical_and(True, pix[0,:] > 0 + width / 2)
mask = np.logical_and(mask, pix[1,:] > 0 + width / 2)
mask = np.logical_and(mask, pix[0,:] < 2080 - width / 2)
mask = np.logical_and(mask, pix[1,:] < 1552 - width / 2)
mask = np.logical_and(mask, pts_wrt_cam[2,:] > 0)
return mask
def computeReprojectionScore(C, pts, I, cam):
(pix, pts_wrt_cam) = computeReprojection(C, pts, cam)
mask = computeMask(pix, pts_wrt_cam)
px = pix[1,mask]
py = pix[0,mask]
return np.sum(I[px,py])
def gridsearch(C, batch, cam):
m = range(-1,2)
step_t = 0.01
step_r = 0.003
best_score = -float("inf")
best_d = None
#scores = np.zeros((729,1))
scores = np.zeros((3**3,1))
idx = 0
for delta in itertools.product(m, repeat=3):
#(dtx, dty, dtz, drx, dry, drz) = delta
#d = np.array([step_t*dtx, step_t*dty, step_t*dtz, step_r*drx, step_r*dry, step_r*drz])
(drx, dry, drz) = delta
d = step_r * np.array([drx, dry, drz])
C_new = C.copy()
C_new[3:6] += d
score = 0
for p in batch:
E = p[2]
proc_pts = p[3]
score = score + computeReprojectionScore(C_new, proc_pts, E, cam)
scores[idx] = score
if score > best_score:
best_score = score
best_C = C_new.copy()
if np.all(np.array(delta)) == 0:
current_score = score
idx = idx + 1
print scores
print current_score
if np.sum( scores > current_score ) >= 3**3 / 2:
return (best_C, best_score)
else:
return (C, current_score)
def drawReprojection(C, pts, I, cam):
(pix, pts_wrt_cam) = computeReprojection(C, pts, cam)
mask = computeMask(pix, pts_wrt_cam)
px = pix[1,mask]
py = pix[0,mask]
intensity = pts[mask, 3]
colors = heatColorMapFast(intensity, 0, 100)
I[px,py,:] = colors[0,:,:]
imshow('display', I)
waitKey(10)
def getNextData(reader, LDRFrameMap):
for idx in range(15):
(success, img) = reader.getNextFrame()
#img = cv2.flip(img,-1)
if not success:
reader.setFrame(3)
ldr_frame = loadLDR(LDRFrameMap[reader.framenum])
return (success, img, ldr_frame)
def processData(data):
I, pts = data
E = processImage(I);
proc_pts = processPointCloud(pts)
dist = np.sqrt(np.sum( proc_pts[:, 0:3] ** 2, axis = 1))
proc_pts = proc_pts[ dist > 3, : ]
#proc_pts = proc_pts[ proc_pts[:, 3] > 2.0, :]
return [I, pts, E, proc_pts]
def processBatch(batch):
processed = [ ]
count = 0
for data in batch:
print 'Processing:', count, 'out of', len(batch)
count += 1
output = processData(data)
processed.append(output)
return processed
def gauss_filt(sigma):
# Isotropic
w = 2 * int(np.ceil(sigma))
G = np.array(xrange(-w, w + 1)) ** 2
G = G.reshape((G.size, 1)) + G
G = np.exp(-G / (2.0 * sigma * sigma))
G /= np.sum(G)
return G
def dgauss_filt(sigma):
'''
Generate a derivative of Gaussian filter in x (left-to-right)
and y (top-to-bottom) directions
'''
G = gauss_filt(sigma)
G_y, G_x = np.gradient(G)
G_x *= 2.0 / np.sum(np.abs(G_x))
G_y *= 2.0 / np.sum(np.abs(G_y))
return G_x, G_y
"""
def processImage(I):
from scipy.signal import convolve2d
E = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
G_x, G_y = dgauss_filt(0.02)
I_x = -convolve2d(E, G_x, mode='same')
I_y = -convolve2d(E, G_y, mode='same')
I_mag = np.sqrt(I_x ** 2 + I_y ** 2)
edges = computeDistanceTransform(I_mag, 0.98, 1.0/2.0)
return edges
"""
def processImage(I):
kernels = generateEdgeFilterKernels()
# convert the image to grayscale
E = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
# run an edge filter
edges = cv2.filter2D(E, cv2.CV_8U, np.zeros((1,1)))
for k in kernels:
edges = np.maximum(edges, np.abs(cv2.filter2D(E, cv2.CV_8U, k)))
edges = computeDistanceTransform(edges+1, 0.98, 1.0/1.8)
return edges
if __name__ == '__main__':
args = parse_args(sys.argv[1], sys.argv[2])
cam_num = int(sys.argv[2][-5])
video_file = args['video']
params = args['params']
cam = params['cam'][cam_num-1]
video_reader = VideoReader(video_file)
ldr_map = loadLDRCamMap(args['map'])
#(tx,ty,tz) = (-0.50000000000000004, -0.2875, 0.34)
(tx,ty,tz) = (-0.50000000000000004, 0.03, 0.34)
(rx,ry,rz) = (0.0,0.0,0.0)
C_current = np.array([tx,ty,tz,rx,ry,rz])
BATCH_SIZE = 30
from multiprocessing import Pool
pool = Pool(10)
while True:
batch_data = [ ]
while len(batch_data) < BATCH_SIZE:
(success, I, raw_pts) = getNextData(video_reader, ldr_map)
if not success:
break
batch_data.append( [I.copy(), raw_pts] )
batch_data = pool.map(processData, batch_data)
#batch_data = processBatch(batch_data)
count = 0
while count < 20:
count +=1
out = gridsearch(C_current, batch_data, cam)
print out[1]
print out[0]
if np.all(C_current == out[0]):
break
C_current = out[0]
for idx in range(len(batch_data)):
if idx != len(batch_data)-1:
continue
proc_pts = batch_data[idx][3]
(pix, pts_wrt_cam) = computeReprojection(C_current, proc_pts, cam)
mask = computeMask(pix, pts_wrt_cam)
px = pix[1,mask]
py = pix[0,mask]
pts = batch_data[idx][1]
#drawReprojection(C_current, pts, batch_data[idx][0].copy(), cam)
E_show = batch_data[idx][2].copy()
for p in range(4):
E_show[px+p,py] = 255
E_show[px,py+p] = 255
E_show[px-p,py] = 255
E_show[px,py-p] = 255
imshow('viz', cv2.pyrDown(E_show/255.0))
waitKey(5)
#imshow('display', I)
#key = chr((waitKey() & 255))
| 29.544118
| 95
| 0.534793
|
4a05f4ad6d27ff369daa090e737e5c9ca3268ba9
| 9,631
|
py
|
Python
|
modules/graph_age_params.py
|
enflujo/COVID_schools_dashboard
|
702c9c3c91938e514e56f4cf6f325ed954d7bc3e
|
[
"Apache-2.0"
] | null | null | null |
modules/graph_age_params.py
|
enflujo/COVID_schools_dashboard
|
702c9c3c91938e514e56f4cf6f325ed954d7bc3e
|
[
"Apache-2.0"
] | null | null | null |
modules/graph_age_params.py
|
enflujo/COVID_schools_dashboard
|
702c9c3c91938e514e56f4cf6f325ed954d7bc3e
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np2
def build(args):
# Get medians
def get_medians(df_p, last):
df_res = df_p.iloc[-last:].groupby(["param"]).median().reset_index()["median"][0]
return df_res
def medians_params(df_list, age_group, last):
params_def = ["age", "beta", "IFR", "RecPeriod", "alpha", "sigma"]
params_val = [
age_group,
get_medians(df_list[0], last),
get_medians(df_list[1], last),
get_medians(df_list[2], last),
get_medians(df_list[3], last),
get_medians(df_list[4], last),
]
res = dict(zip(params_def, params_val))
return res
params_data_BOG = pd.read_csv(args.params_data_path, encoding="unicode_escape", delimiter=",")
# Ages 0-19
young_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "0-19"])
young_ages_beta = pd.DataFrame(young_ages_params[young_ages_params["param"] == "contact_rate"])
young_ages_IFR = pd.DataFrame(young_ages_params[young_ages_params["param"] == "IFR"])
young_ages_RecPeriod = pd.DataFrame(young_ages_params[young_ages_params["param"] == "recovery_period"])
young_ages_alpha = pd.DataFrame(young_ages_params[young_ages_params["param"] == "report_rate"])
young_ages_sigma = pd.DataFrame(young_ages_params[young_ages_params["param"] == "relative_asymp_transmission"])
young_params = [young_ages_beta, young_ages_IFR, young_ages_RecPeriod, young_ages_alpha, young_ages_sigma]
# Ages 20-39
youngAdults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "20-39"])
youngAdults_ages_beta = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params["param"] == "contact_rate"])
youngAdults_ages_IFR = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params["param"] == "IFR"])
youngAdults_ages_RecPeriod = pd.DataFrame(
youngAdults_ages_params[youngAdults_ages_params["param"] == "recovery_period"]
)
youngAdults_ages_alpha = pd.DataFrame(youngAdults_ages_params[youngAdults_ages_params["param"] == "report_rate"])
youngAdults_ages_sigma = pd.DataFrame(
youngAdults_ages_params[youngAdults_ages_params["param"] == "relative_asymp_transmission"]
)
youngAdults_params = [
youngAdults_ages_beta,
youngAdults_ages_IFR,
youngAdults_ages_RecPeriod,
youngAdults_ages_alpha,
youngAdults_ages_sigma,
]
# Ages 40-49
adults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "40-49"])
adults_ages_beta = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "contact_rate"])
adults_ages_IFR = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "IFR"])
adults_ages_RecPeriod = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "recovery_period"])
adults_ages_alpha = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "report_rate"])
adults_ages_sigma = pd.DataFrame(adults_ages_params[adults_ages_params["param"] == "relative_asymp_transmission"])
adults_params = [adults_ages_beta, adults_ages_IFR, adults_ages_RecPeriod, adults_ages_alpha, adults_ages_sigma]
# Ages 50-59
seniorAdults_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "50-59"])
seniorAdults_ages_beta = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params["param"] == "contact_rate"])
seniorAdults_ages_IFR = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params["param"] == "IFR"])
seniorAdults_ages_RecPeriod = pd.DataFrame(
seniorAdults_ages_params[seniorAdults_ages_params["param"] == "recovery_period"]
)
seniorAdults_ages_alpha = pd.DataFrame(seniorAdults_ages_params[seniorAdults_ages_params["param"] == "report_rate"])
seniorAdults_ages_sigma = pd.DataFrame(
seniorAdults_ages_params[seniorAdults_ages_params["param"] == "relative_asymp_transmission"]
)
seniorAdults_params = [
seniorAdults_ages_beta,
seniorAdults_ages_IFR,
seniorAdults_ages_RecPeriod,
seniorAdults_ages_alpha,
seniorAdults_ages_sigma,
]
# Ages 60-69
senior_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "60-69"])
senior_ages_beta = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "contact_rate"])
senior_ages_IFR = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "IFR"])
senior_ages_RecPeriod = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "recovery_period"])
senior_ages_alpha = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "report_rate"])
senior_ages_sigma = pd.DataFrame(senior_ages_params[senior_ages_params["param"] == "relative_asymp_transmission"])
senior_params = [senior_ages_beta, senior_ages_IFR, senior_ages_RecPeriod, senior_ages_alpha, senior_ages_sigma]
# Ages 70+
elderly_ages_params = pd.DataFrame(params_data_BOG[params_data_BOG["age_group"] == "70-90+"])
elderly_ages_beta = pd.DataFrame(elderly_ages_params[elderly_ages_params["param"] == "contact_rate"])
elderly_ages_IFR = pd.DataFrame(elderly_ages_params[elderly_ages_params["param"] == "IFR"])
elderly_ages_RecPeriod = pd.DataFrame(elderly_ages_params[elderly_ages_params["param"] == "recovery_period"])
elderly_ages_alpha = pd.DataFrame(elderly_ages_params[elderly_ages_params["param"] == "report_rate"])
elderly_ages_sigma = pd.DataFrame(
elderly_ages_params[elderly_ages_params["param"] == "relative_asymp_transmission"]
)
elderly_params = [
elderly_ages_beta,
elderly_ages_IFR,
elderly_ages_RecPeriod,
elderly_ages_alpha,
elderly_ages_sigma,
]
young_params_medians = medians_params(young_params, "0-19", last=15) # Schools
youngAdults_params_medians = medians_params(youngAdults_params, "20-39", last=15) # Adults
adults_params_medians = medians_params(adults_params, "40-49", last=15) # Adults
seniorAdults_params_medians = medians_params(seniorAdults_params, "50-59", last=15) # Adults
senior_params_medians = medians_params(senior_params, "60-69", last=15) # Elders
elderly_params_medians = medians_params(elderly_params, "70-90+", last=15) # Elders
# Simplify, get medians of values
params_desc = ["age", "beta", "IFR", "RecPeriod", "alpha", "sigma"]
main_adults_params_values = [
"20-59",
np2.median(
[youngAdults_params_medians["beta"], adults_params_medians["beta"], seniorAdults_params_medians["beta"]]
),
np2.median(
[youngAdults_params_medians["IFR"], adults_params_medians["IFR"], seniorAdults_params_medians["IFR"]]
),
np2.median(
[
youngAdults_params_medians["RecPeriod"],
adults_params_medians["RecPeriod"],
seniorAdults_params_medians["RecPeriod"],
]
),
np2.median(
[youngAdults_params_medians["alpha"], adults_params_medians["alpha"], seniorAdults_params_medians["alpha"]]
),
np2.median(
[youngAdults_params_medians["sigma"], adults_params_medians["sigma"], seniorAdults_params_medians["sigma"]]
),
]
main_adults_params_medians = dict(zip(params_desc, main_adults_params_values))
main_elders_params_values = [
"60-90+",
np2.median([senior_params_medians["beta"], elderly_params_medians["beta"]]),
np2.median([senior_params_medians["IFR"], elderly_params_medians["IFR"]]),
np2.median([senior_params_medians["RecPeriod"], elderly_params_medians["RecPeriod"]]),
np2.median([senior_params_medians["alpha"], elderly_params_medians["alpha"]]),
np2.median([senior_params_medians["sigma"], elderly_params_medians["sigma"]]),
]
main_elders_params_medians = dict(zip(params_desc, main_elders_params_values))
# Define parameters per layers
def calculate_R0(IFR, alpha, beta, RecPeriod, sigma):
return (1 - IFR) * (alpha * beta * RecPeriod + (1 - alpha) * beta * sigma * RecPeriod)
def model_params(params_dict, layer):
layer_params = {
"layer": layer,
"RecPeriod": params_dict["RecPeriod"],
"R0": calculate_R0(
params_dict["IFR"],
params_dict["alpha"],
params_dict["beta"],
params_dict["RecPeriod"],
params_dict["sigma"],
),
}
return layer_params
school_params = model_params(young_params_medians, "schools")
adults_params = model_params(main_adults_params_medians, "adults")
elders_params = model_params(main_elders_params_medians, "elders")
params_def = ["layer", "RecPeriod", "R0"]
run_params = [
[school_params["layer"], adults_params["layer"], elders_params["layer"]],
[school_params["RecPeriod"], adults_params["RecPeriod"], elders_params["RecPeriod"]],
[school_params["R0"], adults_params["R0"], elders_params["R0"]],
]
run_params = dict(zip(params_def, run_params))
return run_params
def cache(args):
########################### Static params ################################################
params = {
"bogota": {
"layer": ["schools", "adults", "elders"],
"RecPeriod": [3.4474289566430567, 3.199665313607661, 3.5877699639670877],
"R0": [2.341839924665767, 2.4098569613929888, 2.404539370553576],
}
}
return pd.DataFrame(params[args.city])
| 49.137755
| 120
| 0.692555
|
4a05f4d2a6c88116db4d09a27db737b1ba69ded3
| 78
|
py
|
Python
|
plugins/rapid7_tcell/komand_rapid7_tcell/actions/list_inline_scripts/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/rapid7_tcell/komand_rapid7_tcell/actions/list_inline_scripts/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/rapid7_tcell/komand_rapid7_tcell/actions/list_inline_scripts/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import ListInlineScripts
| 26
| 39
| 0.794872
|
4a05f55cc3d1dda1d8422d44ce69cad96b69b695
| 9,159
|
py
|
Python
|
cloudflare/tests/unit/test__hook.py
|
DriesSchulten/dehydrated-pi
|
5700b736f60b47a729538b5515728d14f1c58d57
|
[
"MIT"
] | 1
|
2022-02-23T16:25:48.000Z
|
2022-02-23T16:25:48.000Z
|
cloudflare/tests/unit/test__hook.py
|
DriesSchulten/dehydrated-pi
|
5700b736f60b47a729538b5515728d14f1c58d57
|
[
"MIT"
] | null | null | null |
cloudflare/tests/unit/test__hook.py
|
DriesSchulten/dehydrated-pi
|
5700b736f60b47a729538b5515728d14f1c58d57
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import json
import os
import re
import tempfile
import mock
import requests_mock
from six.moves.urllib import parse as urlparse
import testtools
# Setup dummy environment variables so 'hook' can be imported
os.environ['CF_EMAIL'] = "email@example'com"
os.environ['CF_KEY'] = "a_cloudflare_example_key"
import hook # noqa
CF_API_HOST = "api.cloudflare.com"
CF_API_PATH = "/client/v4"
CF_API_SCHEME = "https"
class TestBase(testtools.TestCase):
def setUp(self):
super(TestBase, self).setUp()
self.expected_headers = {
'Content-Type': 'application/json',
'X-Auth-Email': "email@example'com",
'X-Auth-Key': 'a_cloudflare_example_key',
}
ExpectedRequestsData = collections.namedtuple(
'ExpectedRequestsData', ['method', 'path', 'query', 'json_body'])
@requests_mock.Mocker()
class TestRequestCallers(TestBase):
def setUp(self):
super(TestRequestCallers, self).setUp()
self.matcher = re.compile(r'^https://api.cloudflare.com/client/v4/')
def _validate_requests_calls(self, mock_request, expected_data_list):
"""Helper function to check values of calls to requests"""
# Make sure our call count matches up with what we expect
self.assertEqual(len(expected_data_list), mock_request.call_count)
for index, expected_data in enumerate(expected_data_list):
# Provide a bit more info if a test fails
expected_str = "Info: {}".format(expected_data)
request_obj = mock_request.request_history[index]
parsed_url = urlparse.urlparse(request_obj.url)
self.assertEqual(expected_data.method.upper(),
request_obj.method)
self.assertEqual(CF_API_SCHEME, parsed_url.scheme)
self.assertEqual(CF_API_HOST, parsed_url.netloc)
self.assertEqual(
"{}/{}".format(CF_API_PATH, expected_data.path),
parsed_url.path)
self.assertEqual(expected_data.query, request_obj.qs,
expected_str)
if expected_data.json_body is not None:
self.assertEqual(expected_data.json_body,
json.loads(request_obj._request.body),
expected_str)
def test__get_zone_id(self, mock_request):
expected_list = [
ExpectedRequestsData(
method='get',
path="zones",
query={'name': ['example.com']},
json_body=None,
),
]
mock_request.get(self.matcher, text=ZONE_RESPONSE)
auth, result = hook._get_zone_id("example.com")
expected_id = "023e105f4ecef8ad9ca31a8372d0c353"
self.assertEqual(expected_id, result)
self._validate_requests_calls(mock_request=mock_request,
expected_data_list=expected_list)
def test__get_txt_record_id_found(self, mock_request):
expected_list = [
ExpectedRequestsData(
method='get',
path='zones/ZONE_ID/dns_records',
query={'content': ['token'], 'name': ['example.com'],
'type': ['txt']},
json_body=None,
),
]
mock_request.get(self.matcher, text=DNS_RECORDS_RESPONSE)
result = hook._get_txt_record_id({}, "ZONE_ID", "example.com", "TOKEN")
expected_id = "372e67954025e0ba6aaa6d586b9e0b59"
self.assertEqual(expected_id, result)
self._validate_requests_calls(mock_request=mock_request,
expected_data_list=expected_list)
def test__get_txt_record_id_not_found(self, mock_request):
expected_list = [
ExpectedRequestsData(
method='get',
path="zones/ZONE_ID/dns_records",
query={'content': ['token'], 'name': ['example.com'],
'type': ['txt']},
json_body=None,
),
]
mock_request.get(self.matcher, text=DNS_RECORDS_RESPONSE_NOT_FOUND)
result = hook._get_txt_record_id({}, "ZONE_ID", "example.com", "TOKEN")
self.assertEqual(None, result)
self._validate_requests_calls(mock_request=mock_request,
expected_data_list=expected_list)
@mock.patch.object(hook, '_get_txt_record_id',
lambda auth, zone_id, name, token: None)
@mock.patch.object(hook, '_get_txt_record_id',
lambda auth, zone_id, name, token: None)
def test_create_txt_record(self, mock_request):
expected_list = [
ExpectedRequestsData(
method='get',
path="zones",
query={'name': ['example.com']},
json_body=None,
),
ExpectedRequestsData(
method='post',
path=("zones/023e105f4ecef8ad9ca31a8372d0c353/"
"dns_records"),
query={},
json_body={'content': 'TOKEN', 'type': 'TXT', 'ttl': 120,
'name': '_acme-challenge.example.com',
},
)
]
mock_request.get(self.matcher, text=ZONE_RESPONSE)
mock_request.post(self.matcher, text=CREATE_DNS_RECORD_RESPONSE)
args = ['example.com', 'CHALLENGE', 'TOKEN']
result = hook.create_txt_record(args)
self._validate_requests_calls(mock_request=mock_request,
expected_data_list=expected_list)
self.assertEqual(None, result)
# Sample responses
ZONE_RESPONSE = """
{
"success": true,
"errors": [
{}
],
"messages": [
{}
],
"result": [
{
"id": "023e105f4ecef8ad9ca31a8372d0c353",
"name": "example.com",
"development_mode": 7200,
"original_name_servers": [
"ns1.originaldnshost.com",
"ns2.originaldnshost.com"
],
"original_registrar": "GoDaddy",
"original_dnshost": "NameCheap",
"created_on": "2014-01-01T05:20:00.12345Z",
"modified_on": "2014-01-01T05:20:00.12345Z",
"owner": {
"id": "7c5dae5552338874e5053f2534d2767a",
"email": "user@example.com",
"owner_type": "user"
},
"permissions": [
"#zone:read",
"#zone:edit"
],
"plan": {
"id": "e592fd9519420ba7405e1307bff33214",
"name": "Pro Plan",
"price": 20,
"currency": "USD",
"frequency": "monthly",
"legacy_id": "pro",
"is_subscribed": true,
"can_subscribe": true
},
"plan_pending": {
"id": "e592fd9519420ba7405e1307bff33214",
"name": "Pro Plan",
"price": 20,
"currency": "USD",
"frequency": "monthly",
"legacy_id": "pro",
"is_subscribed": true,
"can_subscribe": true
},
"status": "active",
"paused": false,
"type": "full",
"name_servers": [
"tony.ns.cloudflare.com",
"woz.ns.cloudflare.com"
]
}
],
"result_info": {
"page": 1,
"per_page": 20,
"count": 1,
"total_count": 2000
}
}
"""
DNS_RECORDS_RESPONSE = """
{
"success": true,
"errors": [],
"messages": [],
"result": [
{
"id": "372e67954025e0ba6aaa6d586b9e0b59",
"type": "TXT",
"name": "_acme-challenge.test.example.com",
"content": "WyIlYaKOp62zaDu_JDKwfXVCnr4q4ntYtmkZ3y5BF2w",
"proxiable": false,
"proxied": false,
"ttl": 120,
"locked": false,
"zone_id": "023e105f4ecef8ad9ca31a8372d0c353",
"zone_name": "example.com",
"created_on": "2014-01-01T05:20:00.12345Z",
"modified_on": "2014-01-01T05:20:00.12345Z",
"data": {}
}
],
"result_info": {
"page": 1,
"per_page": 20,
"count": 1,
"total_count": 2000
}
}
"""
DNS_RECORDS_RESPONSE_NOT_FOUND = """
{
"success": true,
"errors": [],
"messages": [],
"result": [],
"result_info": {
"page": 1,
"per_page": 20,
"count": 1,
"total_count": 2000
}
}
"""
CREATE_DNS_RECORD_RESPONSE = """
{
"success": true,
"errors": [
{}
],
"messages": [
{}
],
"result": {
"id": "372e67954025e0ba6aaa6d586b9e0b59",
"type": "A",
"name": "example.com",
"content": "1.2.3.4",
"proxiable": true,
"proxied": false,
"ttl": 120,
"locked": false,
"zone_id": "023e105f4ecef8ad9ca31a8372d0c353",
"zone_name": "example.com",
"created_on": "2014-01-01T05:20:00.12345Z",
"modified_on": "2014-01-01T05:20:00.12345Z",
"data": {}
}
}
"""
| 29.737013
| 79
| 0.554318
|
4a05f67f820c17453193e913343b790ebd144787
| 3,409
|
py
|
Python
|
bin/MSVC-Setup.py
|
antonelloceravola/ToolBOSCore
|
b03414a867a9f0585e06bb8e4f299c4be1357f3a
|
[
"BSD-3-Clause"
] | null | null | null |
bin/MSVC-Setup.py
|
antonelloceravola/ToolBOSCore
|
b03414a867a9f0585e06bb8e4f299c4be1357f3a
|
[
"BSD-3-Clause"
] | null | null | null |
bin/MSVC-Setup.py
|
antonelloceravola/ToolBOSCore
|
b03414a867a9f0585e06bb8e4f299c4be1357f3a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Configures the user's shell environment to use MSVC (with Wine)
#
# Copyright (c) Honda Research Institute Europe GmbH
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
#----------------------------------------------------------------------------
# Includes
#----------------------------------------------------------------------------
import logging
import os
from ToolBOSCore.Settings.UserSetup import setupMSVC
from ToolBOSCore.Util import ArgsManagerV2
from ToolBOSCore.Settings import ToolBOSSettings
#----------------------------------------------------------------------------
# Commandline parsing
#----------------------------------------------------------------------------
desc = "Configures the user's shell environment to use MSVC (with Wine)."
argman = ArgsManagerV2.ArgsManager( desc )
argman.addArgument( '-p', '--path',
help='Wine config directory (default: $HOME/.wine)' )
argman.addArgument( '-m', '--msvc-version', type=int,
help='SDK version to setup (default: 2017)' )
argman.addExample( '%(prog)s' )
args = vars( argman.run() )
path = args['path']
version = args['msvc_version']
if not path:
path = os.path.expandvars( '${HOME}/.wine' )
if not version:
version = ToolBOSSettings.getConfigOption( 'msvcVersion')
#----------------------------------------------------------------------------
# Main program
#----------------------------------------------------------------------------
logging.info( 'Wine config directory: %s', path )
if version not in ( 2008, 2010, 2012, 2017 ):
logging.error( 'Unsupported MSVC version %s', version )
try:
setupMSVC( path, version )
logging.info( 'OK, MSVC compiler is ready.' )
except ( EnvironmentError, OSError, ValueError ) as details:
logging.error( details )
logging.error( 'MSVC setup failed!' )
# EOF
| 34.434343
| 77
| 0.622177
|
4a05f6880a67b163e419871abffe6de9f36489c9
| 5,903
|
py
|
Python
|
tensorflow_probability/python/distributions/chi.py
|
cafeal/probability
|
f968a32d601d29ec31a10568ccfe30263cf91ef2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/chi.py
|
cafeal/probability
|
f968a32d601d29ec31a10568ccfe30263cf91ef2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/chi.py
|
cafeal/probability
|
f968a32d601d29ec31a10568ccfe30263cf91ef2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Chi distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import invert as invert_bijector
from tensorflow_probability.python.bijectors import square as square_bijector
from tensorflow_probability.python.distributions import chi2
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import tensor_util
class Chi(transformed_distribution.TransformedDistribution):
"""Chi distribution.
The Chi distribution is defined over nonnegative real numbers and uses a
degrees of freedom ('df') parameter.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; df, x >= 0) = x**(df - 1) exp(-0.5 x**2) / Z
Z = 2**(0.5 df - 1) Gamma(0.5 df)
```
where:
* `df` denotes the degrees of freedom,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The Chi distribution is a transformation of the Chi2 distribution; it is the
distribution of the positive square root of a variable obeying a Chi2
distribution.
"""
def __init__(self,
df,
validate_args=False,
allow_nan_stats=True,
name='Chi'):
"""Construct Chi distributions with parameter `df`.
Args:
df: Floating point tensor, the degrees of freedom of the
distribution(s). `df` must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value `NaN` to indicate the result
is undefined. When `False`, an exception is raised if one or more of the
statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Default value: `'Chi'`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([df], dtype_hint=tf.float32)
self._df = tensor_util.convert_nonref_to_tensor(
df, name='df', dtype=dtype)
super(Chi, self).__init__(
distribution=chi2.Chi2(df=self._df,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats),
bijector=invert_bijector.Invert(
square_bijector.Square(validate_args=validate_args)),
validate_args=validate_args,
parameters=parameters,
name=name)
@classmethod
def _params_event_ndims(cls):
return dict(df=0)
@property
def df(self):
"""Distribution parameter for degrees of freedom."""
return self._df
def _mean(self, df=None):
df = tf.convert_to_tensor(self.df if df is None else df)
return np.sqrt(2.) * tf.exp(
tf.math.lgamma(0.5 * (df + 1.)) - tf.math.lgamma(0.5 * df))
def _variance(self):
df = tf.convert_to_tensor(self.df)
return df - self._mean(df) ** 2
def _entropy(self):
df = tf.convert_to_tensor(self.df)
return (tf.math.lgamma(0.5 * df) +
0.5 * (df - np.log(2.) -
(df - 1.) * tf.math.digamma(0.5 * df)))
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self._df):
assertions.append(assert_util.assert_positive(
self._df, message='Argument `df` must be positive.'))
return assertions
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_non_negative(
x, message='Sample must be non-negative.'))
return assertions
@kullback_leibler.RegisterKL(Chi, Chi)
def _kl_chi_chi(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Chi.
Args:
a: instance of a Chi distribution object.
b: instance of a Chi distribution object.
name: (optional) Name to use for created operations.
default is 'kl_chi_chi'.
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or 'kl_chi_chi'):
a_df = tf.convert_to_tensor(a.df)
b_df = tf.convert_to_tensor(b.df)
# Consistent with
# https://mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 118
# The paper introduces an additional scaling parameter; setting that
# parameter to 1 and simplifying yields the expression we use here.
return (0.5 * tf.math.digamma(0.5 * a_df) * (a_df - b_df) +
tf.math.lgamma(0.5 * b_df) - tf.math.lgamma(0.5 * a_df))
| 36.438272
| 80
| 0.682026
|
4a05f6945c2575980830ca1c491b7e1b2f6004b6
| 1,596
|
py
|
Python
|
rocketgram/keyboards/reply.py
|
waneroy/rocketgram
|
b84d12772a743a534878e417cd8c1f3c7d4ace1f
|
[
"MIT"
] | null | null | null |
rocketgram/keyboards/reply.py
|
waneroy/rocketgram
|
b84d12772a743a534878e417cd8c1f3c7d4ace1f
|
[
"MIT"
] | null | null | null |
rocketgram/keyboards/reply.py
|
waneroy/rocketgram
|
b84d12772a743a534878e417cd8c1f3c7d4ace1f
|
[
"MIT"
] | 1
|
2021-02-26T14:21:59.000Z
|
2021-02-26T14:21:59.000Z
|
# Copyright (C) 2015-2019 by Vd.
# This file is part of RocketGram, the modern Telegram bot framework.
# RocketGram is released under the MIT License (see LICENSE).
from .keyboard import Keyboard
class ReplyKeyboard(Keyboard):
def __init__(self, *, selective=False, one_time=False, resize=False):
super().__init__()
self._keyboard_type = 'keyboard'
self.set_selective(selective)
self.set_one_time(one_time)
self.set_resize(resize)
def set_selective(self, selective=False):
if selective:
self._options['selective'] = True
elif 'selective' in self._options:
del self._options['selective']
return self
def set_one_time(self, one_time_keyboard=False):
if one_time_keyboard:
self._options['one_time_keyboard'] = True
elif 'one_time_keyboard' in self._options:
del self._options['one_time_keyboard']
return self
def set_resize(self, resize_keyboard=False):
if resize_keyboard:
self._options['resize_keyboard'] = True
elif 'resize_keyboard' in self._options:
del self._options['resize_keyboard']
return self
def text(self, text):
btn = {'text': text}
self._buttons.append(btn)
return self
def contact(self, text):
btn = {'text': text, 'request_contact': True}
self._buttons.append(btn)
return self
def location(self, text):
btn = {'text': text, 'request_location': True}
self._buttons.append(btn)
return self
| 30.692308
| 73
| 0.635965
|
4a05f6e2e824f4c68af9351af34d058ab8702266
| 11,740
|
py
|
Python
|
exe/db_petit-saut.py
|
Tristanovsk/trios
|
d84a498f0b562d7a792a4588e4d983be885f24b9
|
[
"MIT"
] | null | null | null |
exe/db_petit-saut.py
|
Tristanovsk/trios
|
d84a498f0b562d7a792a4588e4d983be885f24b9
|
[
"MIT"
] | null | null | null |
exe/db_petit-saut.py
|
Tristanovsk/trios
|
d84a498f0b562d7a792a4588e4d983be885f24b9
|
[
"MIT"
] | null | null | null |
import glob
import re
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
from scipy.interpolate import interp1d
import pyodbc
import fiona
import pandas as pd
import pandas_access as pda
import geopandas as gpd
fiona.drvsupport.supported_drivers['kml'] = 'rw' # enable KML support which is disabled by default
fiona.drvsupport.supported_drivers['KML'] = 'rw' # enable KML support which is disabled by default
from trios.utils.sunposition import sunpos
from trios.utils import utils as u
from trios.utils.utils import plot as up
from trios.process import *
project_folder = '/DATA/projet/petit-saut/'
dirfig = os.path.join(project_folder, 'fig')
odir = os.path.join(project_folder, 'data/L2')
coordf = os.path.join(project_folder, 'data/gps_points.kml')
coords = gpd.read_file(coordf)
awrfiles = glob.glob(os.path.join(project_folder, 'data/csv/aw*.csv'))
swrfiles = glob.glob(os.path.join(project_folder, 'data/csv/Lu0*.csv'))
# get idpr
idprs = np.unique([re.split('\.', re.split(r'idpr', x)[1])[0] for x in awrfiles])
idprs = np.unique(np.append(idprs, [re.split('\.', re.split(r'idpr', x)[1])[0] for x in swrfiles]))
idpr = idprs[0]
for idpr in idprs:
name = idpr
for idx, p in enumerate(coords.Name):
if p in idpr:
break
c = coords.iloc[idx]
print(c)
lat = c.geometry.y
lon = c.geometry.x
alt = 35 # c.geometry.z.values[0]
# -----------------------------------------------
# SWR processing
# -----------------------------------------------
uswr = u.swr_data(idpr, swrfiles)
if uswr.file:
df, wl_swr = uswr.reader(lat, lon, alt)
df['sza', ''] = np.nan
for index, row in df.iterrows():
# print index
sza = sunpos(index, lat, lon, alt)[1]
df.at[index, 'sza'] = sza
swr = swr_process(df, wl_swr)
Rrs_swr = swr.call_process()
date = index.date().__str__()
mpl.rcParams.update({'font.size': 18})
fig, ax = plt.subplots( figsize=(7, 6))
up.add_curve(ax, wl_swr, Rrs_swr.transpose().mean(axis=1), Rrs_swr.transpose().std(axis=1), label='swr',
c='black')
ax.set_ylabel(r'$R_{rs}\ (sr^{-1})$')
ax.set_xlabel(r'Wavelength (nm)')
ax.set_title('ID: '+idpr+', '+date+', sza=' + str(round(sza.mean(), 2)))
fig.savefig(os.path.join(dirfig, 'trios_swr_' + date + '_idpr' + idpr + '.png'), bbox_inches='tight')
plt.close()
ofile = os.path.join(odir, 'Rrs_swr_' + date + '_idpr' + idpr + '_PSA_guyane.csv')
Rrs_stat = Rrs_swr.describe()
Rrs_stat.columns=Rrs_stat.columns.droplevel()
Rrs_stat = Rrs_stat.T
Rrs_stat.to_csv(ofile,mode='a')
#
# -----------------------------------------------
# AWR processing
# -----------------------------------------------
azi = 135
vza = 40
awr = u.awr_data(idpr, awrfiles)
if awr.Edf:
index_idx = [0]
d = u.data(index_idx)
Ed, wl_Ed = d.load_csv(awr.Edf)
Lsky, wl_Lsky = d.load_csv(awr.Lskyf)
Lt, wl_Lt = d.load_csv(awr.Ltf)
# ''' interpolate Ed and Lsky data upon Lt wavelength'''
wl = wl_Lt
Lt.columns = pd.MultiIndex.from_tuples(zip(['Lt'] * len(wl), wl), names=['param', 'wl'])
intEd = interp1d(wl_Ed, Ed.values, fill_value='extrapolate')(wl)
newEd = pd.DataFrame(index=Ed.index,
columns=pd.MultiIndex.from_tuples(zip(['Ed'] * len(wl), wl), names=['param', 'wl']),
data=intEd)
intLsky = interp1d(wl_Lsky, Lsky.values, fill_value='extrapolate')(wl)
newLsky = pd.DataFrame(index=Lsky.index, columns=pd.MultiIndex.from_tuples(zip(['Lsky'] * len(wl), wl),
names=['param', 'wl']), data=intLsky)
awr = awr_process()
ws = [2]
print(azi, vza)
Lsky = newLsky # .loc[(newLsky.index.get_level_values(1) == vza) & (newLsky.index.get_level_values(2) == azi)]
Ed = newEd # .loc[(newEd.index.get_level_values(1) == vza) & (newEd.index.get_level_values(2) == azi)]
# Lsky_idx = Lsky.index
# Ed_idx= Ed.index
# Lt_idx = Lt.index
# Lsky.reset_index(level=[1,2],inplace=True)
# Ed.reset_index(level=[1,2],inplace=True)
# Lt.reset_index(level=[1,2],inplace=True)
# merge sensor data on time
raw = pd.merge_asof(Lt, Ed, left_index=True, right_index=True, tolerance=pd.Timedelta("2 seconds"),
direction="nearest")
raw = pd.merge_asof(raw, Lsky, left_index=True, right_index=True, tolerance=pd.Timedelta("2 seconds"),
direction="nearest")
# add solar angle data and idpr
# compute solar angle (mean between fisrt and last aqcuisition time
raw['sza', ''] = np.nan
for index, row in raw.iterrows():
# print index
sza = sunpos(index, lat, lon, alt)[1]
raw.at[index, 'sza'] = sza
# ------------------
# filtering
# ------------------
ind = awr.filtering(raw.Lt, raw.Lsky, raw.Ed)
clean = raw[ind]
Lt, Lsky, Ed, sza = clean.Lt.values, clean.Lsky.values, clean.Ed.values, clean.sza.values
# -----------------------------
# data processing
# -----------------------------
Rrs99, rho99 = awr.process_wrapper(wl, clean, clean.sza, ws=ws, azi=azi)
Rrs15, rho15 = awr.process_wrapper(wl, clean, clean.sza, ws=ws, azi=azi, method='M15')
Rrs_h, rho_h = awr.process_wrapper(wl, clean, clean.sza, ws=ws, azi=azi, method='osoaa')
Rrs_opt, Rrs_opt_std = awr.process_optimization(wl, Lt, Lsky, Ed, sza, azi=azi)
wl = Rrs99.T.index.get_level_values(1)
date = Rrs99.index.get_level_values(0).date[0].__str__()
# ------------------
# plotting
# ------------------
Ltm = Lt.mean(axis=0)
Edm = Ed.mean(axis=0)
mpl.rcParams.update({'font.size': 18})
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(20, 12))
fig.subplots_adjust(left=0.1, right=0.9, hspace=.5, wspace=0.45)
# ---- Ed
ax = axs[0, 0]
up.add_curve(ax, wl, Ed.mean(axis=0),
label=r'$L_{sky}$', c='red') # just to put the two labels
up.add_curve(ax, wl, Ed.mean(axis=0), Ed.std(axis=0),
label=r'$E_s$', c='black')
ax.set_ylabel(r'$E_{d}(0^{+})$')
# ---- Lsky
ax2 = ax.twinx()
up.add_curve(ax2, wl, Lsky.mean(axis=0), Lsky.std(axis=0),
label=r'$L_{sky}$', c='red')
ax2.set_ylabel(r'$L_{sky}$', color='r')
ax2.tick_params('y', colors='r')
ax.set_xlabel(r'Wavelength (nm)')
ax.legend(loc='best', frameon=False)
# ---- Lt vs Lsurf
ax = axs[0, 1]
up.add_curve(ax, wl, Lt.mean(axis=0), Lt.std(axis=0),
label=r'$L_t$', c='black')
up.add_curve(ax, wl, Lsky.mean(axis=0) * rho15, Lsky.std(axis=0) * rho15,
label='M2015 (' + str(round(rho15, 4)) + ')', c='violet')
up.add_curve(ax, wl, Lsky.mean(axis=0) * rho99, Lsky.std(axis=0) * rho99, c='orange',
label='M1999(' + str(round(rho99, 4)) + ')')
up.add_curve(ax, wl, Lsky.mean(axis=0) * rho_h, Lsky.std(axis=0) * rho_h, c='green',
label='h(' + str(round(rho_h.mean(), 4)) + ')')
ax.set_ylabel(r'$L_t\ or L_{surf}$')
ax.set_xlabel(r'Wavelength (nm)')
# ---- Proportion o(Lt - Lsurf ) /Lt
ax = axs[0, 2]
up.add_curve(ax, wl, Lsky.mean(axis=0) * rho15 / Ltm, Lsky.std(axis=0) * rho15,
label='M2015 (' + str(round(rho15, 4)) + ')', c='violet')
up.add_curve(ax, wl, Lsky.mean(axis=0) * rho99 / Ltm, Lsky.std(axis=0) * rho99, c='orange',
label='M1999(' + str(round(rho99, 4)) + ')')
up.add_curve(ax, wl, Lsky.mean(axis=0) * rho_h / Ltm, Lsky.std(axis=0) * rho_h, c='green',
label='h(' + str(round(rho_h.mean(), 4)) + ')')
ax.set_ylabel(r'$L_{surf}/L_t$')
ax.set_xlabel(r'Wavelength (nm)')
# ---- Lw
ax = axs[1, 0]
up.add_curve(ax, wl, Rrs15.mean(axis=0) * Edm, Rrs15.std(axis=0) * Edm,
label='M2015 (' + str(round(rho15, 4)) + ')', c='violet')
up.add_curve(ax, wl, Rrs99.mean(axis=0) * Edm, Rrs99.std(axis=0) * Edm, c='orange',
label='M1999(' + str(round(rho99, 4)) + ')')
up.add_curve(ax, wl, Rrs_h.mean(axis=0) * Edm, Rrs_h.std(axis=0) * Edm, c='green',
label='h(' + str(round(rho_h.mean(), 4)) + ')')
up.add_curve(ax, wl, Rrs_opt * Edm, Rrs_opt_std * Edm, c='blue',
label='Optimization')
ax.set_ylabel(r'$L_{w}\ (sr^{-1})$')
ax.set_xlabel(r'Wavelength (nm)')
# ---- Rrs
ax = axs[1, 1]
up.add_curve(ax, wl_swr, Rrs_swr.transpose().mean(axis=1), Rrs_swr.transpose().std(axis=1), label='swr',
c='black')
up.add_curve(ax, wl, Rrs15.transpose().mean(axis=1), Rrs15.transpose().std(axis=1),
label='M2015 (' + str(round(rho15, 4)) + ')', c='violet')
up.add_curve(ax, wl, Rrs99.transpose().mean(axis=1), Rrs99.transpose().std(axis=1), c='orange',
label='M1999(' + str(round(rho99, 4)) + ')')
up.add_curve(ax, wl, Rrs_h.transpose().mean(axis=1), Rrs_h.transpose().std(axis=1), c='green',
label='h(' + str(round(rho_h.mean(), 4)) + ')')
up.add_curve(ax, wl, Rrs_opt, Rrs_opt_std, c='blue',
label='Optimization')
ax.set_ylabel(r'$R_{rs}\ (sr^{-1})$')
ax.set_xlabel(r'Wavelength (nm)')
ax.set_title('azi=' + str(azi) + ', vza=' + str(vza) + ', sza=' + str(round(sza.mean(), 2)))
# ---- delta Rrs
ax = axs[1, 2]
Rrs_swr_ = interp1d(wl_swr, Rrs_swr.transpose().mean(axis=1), fill_value='extrapolate')(wl)
Rrs_swr_[wl > 850] = np.nan
up.add_curve(ax, wl, (Rrs15.mean(axis=0) - Rrs_swr_) / Rrs_swr_,
label='M2015 (' + str(round(rho15, 4)) + ')', c='violet')
up.add_curve(ax, wl, (Rrs99.mean(axis=0) - Rrs_swr_) / Rrs_swr_, c='orange',
label='M1999(' + str(round(rho99, 4)) + ')')
up.add_curve(ax, wl, (Rrs_h.mean(axis=0) - Rrs_swr_) / Rrs_swr_, c='green',
label='h(' + str(round(rho_h.mean(), 4)) + ')')
up.add_curve(ax, wl, (Rrs_opt - Rrs_swr_) / Rrs_swr_, c='blue',
label='Optimization')
ax.set_ylabel(r'$\Delta^{rel} R_{rs} $')
ax.set_xlabel(r'Wavelength (nm)')
ax.legend(loc='best', frameon=False)
fig.suptitle('trios_awr ' + name + ' idpr' + idpr, fontsize=16)
fig.savefig(os.path.join(dirfig, 'trios_awr_' + name + '_idpr' + idpr + '.png'))
plt.close()
#
#
#
# date = c['Date_prel'].dt.strftime('%Y%m%d')
#
# dbf = '/DATA/projet/petit-saut/data/dataTrios_Guyane_20190523.mdb'
# #data = pda.read_table(dbf,'tblData')
#
#
#
#
# driver='DRIVER={Microsoft Access Driver (*.mdb, *.accdb)}'
# driver='DRIVER={'+pyodbc.drivers()[2]+'}'
# # connect to bd TRIOS
# odbc = pyodbc.connect(driver+';DBQ=' + dbf)
#
# query = 'SELECT * FROM tblData WHERE ((tblData.IDDataType LIKE \'SPECTRUM\') )) ' AND ' \
# '((tblData.IDDataTypeSub1 LIKE \'CALIBRATED\') OR (tblData.IDDataTypeSub1 LIKE \'CALCULATED\')))'
#
# ramses_df = pd.read_sql(query, odbc)
#
#
| 41.338028
| 121
| 0.538671
|
4a05f73f6b7ace0e28c57cbe3b21760a41918c8b
| 1,258
|
py
|
Python
|
app/models.py
|
UMCUGenetics/illumina-runinfo
|
48c451fe034670c9b15289f6f94226757147941d
|
[
"MIT"
] | 1
|
2018-06-18T16:23:08.000Z
|
2018-06-18T16:23:08.000Z
|
app/models.py
|
UMCUGenetics/illumina-runinfo
|
48c451fe034670c9b15289f6f94226757147941d
|
[
"MIT"
] | null | null | null |
app/models.py
|
UMCUGenetics/illumina-runinfo
|
48c451fe034670c9b15289f6f94226757147941d
|
[
"MIT"
] | 1
|
2021-02-10T13:45:45.000Z
|
2021-02-10T13:45:45.000Z
|
from app import db
class RunInfo(db.Model):
## Required
id = db.Column(db.Integer, primary_key=True)
run_id = db.Column(db.String(50), nullable=False, unique=True)
experiment_name = db.Column(db.String(100), nullable=False)
run_start_date = db.Column(db.Date, nullable=False)
barcode = db.Column(db.String(50), nullable=False)
run_mode = db.Column(db.String(50))
paired_end = db.Column(db.Boolean)
read_1 = db.Column(db.Integer)
read_2 = db.Column(db.Integer)
index_read_1 = db.Column(db.Integer)
index_read_2 = db.Column(db.Integer)
pe = db.Column(db.String(50))
platform_id = db.Column(db.Integer, db.ForeignKey('platform.id'), nullable=False)
def __repr__(self):
return "{} \t {} \t {}".format(self.run_id, self.experiment_name, self.run_start_date)
class Platform(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True)
application_name = db.Column(db.String(80), unique=True)
runs = db.relationship('RunInfo', backref='platform', lazy='dynamic')
def __init__(self, name, application_name):
self.name = name
self.application_name = application_name
def __repr__(self):
return self.name
| 34.944444
| 94
| 0.682035
|
4a05f860cc7ff28ad07d992d289994698550398a
| 9,879
|
py
|
Python
|
tg_bot/functions.py
|
EeOneDown/spbu4u
|
2ad01088fb167c80c53b757a0247fc5cde34c20f
|
[
"Apache-2.0"
] | 30
|
2017-09-14T20:25:43.000Z
|
2022-03-12T09:55:35.000Z
|
tg_bot/functions.py
|
EeOneDown/spbu4u
|
2ad01088fb167c80c53b757a0247fc5cde34c20f
|
[
"Apache-2.0"
] | 59
|
2018-01-12T18:29:24.000Z
|
2019-03-08T21:08:40.000Z
|
tg_bot/functions.py
|
EeOneDown/spbu4u
|
2ad01088fb167c80c53b757a0247fc5cde34c20f
|
[
"Apache-2.0"
] | 8
|
2017-12-01T18:36:04.000Z
|
2020-11-22T00:36:15.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from datetime import datetime, date, timedelta
import pymysql
import spbu
from app.constants import emoji, subject_short_types
server_timedelta = timedelta(hours=0)
def parse_event_time(event):
return "{0} {1:0>2}:{2:0>2}{3}{4:0>2}:{5:0>2}".format(
emoji["clock"],
datetime_from_string(event["Start"]).time().hour,
datetime_from_string(event["Start"]).time().minute,
emoji["en_dash"],
datetime_from_string(event["End"]).time().hour,
datetime_from_string(event["End"]).time().minute
)
def parse_event_subject(event):
answer = ""
subject_name = ", ".join(event["Subject"].split(", ")[:-1])
subject_type = event["Subject"].split(", ")[-1]
stripped_subject_type = " ".join(subject_type.split()[:2])
if stripped_subject_type in subject_short_types.keys():
answer += subject_short_types[stripped_subject_type] + " - "
else:
answer += subject_type.upper() + " - "
answer += subject_name
return answer
def parse_event_location(location, full_place=True, have_chosen_educator=False,
chosen_educator=None):
answer = ""
if location["IsEmpty"]:
return answer
if have_chosen_educator and not chosen_educator.issuperset(
{edu["Item2"].split(", ")[0] for edu in location["EducatorIds"]}
):
return answer
if full_place:
location_name = location["DisplayName"].strip(", ").strip()
else:
location_name = location["DisplayName"].split(", ")[-1].strip()
answer += location_name
if location["HasEducators"]:
educators = [educator["Item2"].split(", ")[0] for educator in
location["EducatorIds"]]
if len(educators):
answer += " <i>({0})</i>".format("; ".join(educators))
return answer
def insert_skip(event_name, types, event_day, event_time,
educators, user_id, is_choose_educator=False):
sql_con = get_connection()
cursor = sql_con.cursor()
try:
cursor.execute("""INSERT INTO lessons
(name, types, day, time, educators)
VALUES (%s, %s, %s, %s, %s)""",
(event_name, types, event_day, event_time, educators))
sql_con.commit()
except pymysql.IntegrityError:
sql_con.rollback()
finally:
cursor.execute("""SELECT id
FROM lessons
WHERE name = %s
AND types = %s
AND day = %s
AND time = %s
AND educators = %s""",
(event_name, types, event_day, event_time, educators))
lesson_id = cursor.fetchone()[0]
try:
if is_choose_educator:
cursor.execute("""INSERT INTO user_educators VALUES (%s, %s)""",
(user_id, lesson_id))
else:
cursor.execute("""INSERT INTO skips VALUES (%s, %s)""",
(lesson_id, user_id))
sql_con.commit()
except pymysql.IntegrityError:
sql_con.rollback()
finally:
cursor.close()
sql_con.close()
def get_hide_lessons_data(user_id, week_day=None,
is_educator=False):
sql_con = get_connection()
cursor = sql_con.cursor()
sql_req = """SELECT
s.lesson_id,
l.name,
l.types,
l.day,
l.time,
l.educators
"""
if is_educator:
sql_req += """FROM user_educators AS s
JOIN lessons AS l
ON l.id = s.lesson_id
"""
else:
sql_req += """FROM skips AS s
JOIN lessons AS l
ON l.id = s.lesson_id
"""
sql_req += """WHERE user_id = %s"""
req_param = (user_id,)
if week_day:
sql_req += " AND (day = 'all' OR day = %s)"
req_param += (week_day, )
cursor.execute(sql_req, req_param)
data = cursor.fetchall()
cursor.close()
sql_con.close()
return data
def get_connection():
import sqlite3
return sqlite3.connect("app.db")
def date_from_iso(iso):
return datetime.strptime("%d%02d%d" % (iso[0], iso[1], iso[2]),
"%Y%W%w").date()
def get_current_monday_date():
iso_day_date = list((date.today() + server_timedelta).isocalendar())
if iso_day_date[2] == 7:
iso_day_date[1] += 1
iso_day_date[2] = 1
monday_date = date_from_iso(iso_day_date)
return monday_date
def delete_symbols(json_obj):
return json.loads(
json.dumps(json_obj).replace("<", "").replace(">", "").replace("&", "")
)
def get_chosen_educators(user_id):
sql_con = get_connection()
cursor = sql_con.cursor()
data = {}
sql_req = """SELECT
lessons.name,
lessons.educators
FROM user_educators
JOIN lessons
ON user_educators.lesson_id = lessons.id
WHERE user_educators.user_id = %s"""
cursor.execute(sql_req, (user_id,))
for row in cursor.fetchall():
if row[0] in data.keys():
data[row[0]].add(row[1])
else:
data[row[0]] = {row[1]}
return data
def datetime_from_string(dt_string):
return datetime.strptime(dt_string, "%Y-%m-%dT%H:%M:%S")
def is_event_in_skips(event, skips, week_day_string):
event_educators = []
for educator in event["EducatorIds"]:
event_educators.append(educator["Item2"].split(", ")[0])
event_educators = set(event_educators)
for skip_lesson in skips:
skip_educators = set(skip_lesson[5].split("; "))
stripped_type = " ".join(event["Subject"].split(", ")[-1].split()[:2])
if skip_lesson[1] == ", ".join(event["Subject"].split(", ")[:-1]) and \
(skip_lesson[2] == "all" or
stripped_type in skip_lesson[2].split("; ")) and \
(skip_lesson[3] == "all" or
skip_lesson[3] == week_day_string) and \
(skip_lesson[4] == "all" or
skip_lesson[4] == parse_event_time(event)) and \
(skip_lesson[5] == "all" or
event_educators.issubset(skip_educators)):
return True
return False
def get_json_week_data(user_id, next_week=False, for_day=None):
sql_con = get_connection()
cursor = sql_con.cursor()
cursor.execute("""SELECT group_id
FROM user_data
WHERE id= %s""", (user_id,))
group_id = cursor.fetchone()[0]
cursor.close()
sql_con.close()
if for_day:
monday_date = for_day
elif next_week:
monday_date = get_current_monday_date()
monday_date += timedelta(days=7)
else:
monday_date = get_current_monday_date()
json_week_data = spbu.get_group_events(group_id=group_id,
from_date=monday_date)
return delete_symbols(json_week_data)
def get_json_day_data(user_id, day_date, json_week_data=None, next_week=False):
if json_week_data is None:
json_week_data = get_json_week_data(user_id, next_week)
for day_info in json_week_data["Days"]:
if datetime_from_string(day_info["Day"]).date() == day_date:
return day_info
return None
def get_lessons_with_educators(user_id, day_date):
json_day = get_json_day_data(user_id, day_date)
answer = ""
day_study_events = json_day["DayStudyEvents"]
count = 0
for event in day_study_events:
event_text = ""
if (event["IsCancelled"]
or len([loc for loc in event["EventLocations"]
if loc["HasEducators"]]) < 2):
continue
subject_name = ", ".join(event["Subject"].split(", ")[:-1])
event_text += "{0}</b>".format(subject_name)
if is_event_in_skips(event, get_hide_lessons_data(
user_id, week_day=json_day["DayString"].split(", ")[0]),
json_day["DayString"].split(", ")[0]):
event_text += " {0}".format(emoji["cross_mark"])
event_text += "\n"
chosen_educators = get_chosen_educators(user_id)
have_chosen_educator = False
if subject_name in chosen_educators.keys() \
and any(
ch_edu in [
edu["Item2"].split(", ")[0] for edu in event["EducatorIds"]
] for ch_edu in chosen_educators[subject_name]
):
have_chosen_educator = True
for location in event["EventLocations"]:
event_text += location["DisplayName"].strip(", ")
educators = {educator["Item2"].split(", ")[0] for educator in
location["EducatorIds"]}
if len(educators):
event_text += " <i>({0})</i>".format("; ".join(educators))
if have_chosen_educator and educators.issubset(chosen_educators[
subject_name]):
event_text += " {0}".format(emoji["heavy_check_mark"])
event_text += "\n"
if event_text not in answer:
count += 1
answer += "<b>{0}. {1}\n".format(count, event_text)
if answer == "":
data = {"is_empty": True, "answer": "Подходящих занятий нет",
"date": json_day["DayString"].capitalize()}
else:
data = {"is_empty": False, "answer": answer.strip("\n\n"),
"date": json_day["DayString"].capitalize()}
return data
| 33.716724
| 79
| 0.551675
|
4a05f8dc94cf74888fc19e7d9d1ea25746586951
| 1,933
|
py
|
Python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2018_06_01/models/__init__.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2018_06_01/models/__init__.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2018_06_01/models/__init__.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Location
from ._models_py3 import LocationListResult
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import Subscription
from ._models_py3 import SubscriptionListResult
from ._models_py3 import SubscriptionPolicies
from ._models_py3 import TenantIdDescription
from ._models_py3 import TenantListResult
except (SyntaxError, ImportError):
from ._models import Location # type: ignore
from ._models import LocationListResult # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import Subscription # type: ignore
from ._models import SubscriptionListResult # type: ignore
from ._models import SubscriptionPolicies # type: ignore
from ._models import TenantIdDescription # type: ignore
from ._models import TenantListResult # type: ignore
from ._subscription_client_enums import (
SpendingLimit,
SubscriptionState,
)
__all__ = [
'Location',
'LocationListResult',
'Operation',
'OperationDisplay',
'OperationListResult',
'Subscription',
'SubscriptionListResult',
'SubscriptionPolicies',
'TenantIdDescription',
'TenantListResult',
'SpendingLimit',
'SubscriptionState',
]
| 37.901961
| 94
| 0.691671
|
4a05fa517e7c5b3e5eb0ae582b36d2c7b4d9e1a1
| 855
|
py
|
Python
|
tests/test_data.py
|
mikemhenry/pyscreener
|
9894eaac59a8648c55e834e061c31daa85fb74bd
|
[
"MIT"
] | 34
|
2021-01-08T00:32:01.000Z
|
2022-02-20T20:02:55.000Z
|
tests/test_data.py
|
mikemhenry/pyscreener
|
9894eaac59a8648c55e834e061c31daa85fb74bd
|
[
"MIT"
] | 24
|
2021-01-29T18:28:45.000Z
|
2022-03-22T21:48:01.000Z
|
tests/test_data.py
|
mikemhenry/pyscreener
|
9894eaac59a8648c55e834e061c31daa85fb74bd
|
[
"MIT"
] | 13
|
2021-01-09T11:07:03.000Z
|
2022-02-10T23:08:11.000Z
|
import random
import uuid
import pytest
from pyscreener.docking import CalculationData, Result
from pyscreener.exceptions import InvalidResultError, NotSimulatedError
@pytest.fixture(
params=["CCCCCCC", "C1CCC1", "CC(=O)CC", "CCCCCCCC", "CCCC1CC1"]
)
def smi(request):
return request.param
def test_notsimulated(smi):
data = CalculationData(smi, None, None, None, None)
with pytest.raises(NotSimulatedError):
data.score
def test_invalid_result(smi):
data = CalculationData(smi, None, None, None, None)
data.result = {"score": random.random()}
with pytest.raises(InvalidResultError):
data.score
def test_score(smi):
data = CalculationData(smi, None, None, None, None)
score = random.random()
data.result = Result(smi, 'ligand', str(uuid.uuid4()), score)
assert data.result.score == score
| 25.909091
| 71
| 0.708772
|
4a05fb3796e881a1620c454a358c17b5b4abb745
| 3,217
|
py
|
Python
|
backend/migrations/0002_auto_20190814_1719.py
|
animeshk08/voting-ethereum
|
60c8e97a7bd5124cda295237d7b6919b3bb1f9b7
|
[
"MIT"
] | null | null | null |
backend/migrations/0002_auto_20190814_1719.py
|
animeshk08/voting-ethereum
|
60c8e97a7bd5124cda295237d7b6919b3bb1f9b7
|
[
"MIT"
] | 6
|
2021-03-19T11:44:10.000Z
|
2021-05-11T19:18:34.000Z
|
backend/migrations/0002_auto_20190814_1719.py
|
animeshk08/voting-ethereum
|
60c8e97a7bd5124cda295237d7b6919b3bb1f9b7
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.4 on 2019-08-14 17:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backend', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AadharDetail',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('mobile_num', models.CharField(max_length=20)),
('aadhar_num', models.CharField(max_length=20)),
('finger_print', models.TextField(max_length=255)),
('age', models.IntegerField()),
('gender', models.CharField(max_length=20)),
('address', models.TextField(max_length=255)),
('pincode', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Constituency',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Election',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('start_date', models.DateField()),
('end_date', models.DateField()),
],
),
migrations.CreateModel(
name='ElectionConstituency',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('constituency_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backend.Constituency')),
('election_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backend.Election')),
],
),
migrations.CreateModel(
name='Party',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='PartyCandidate',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('aadhar_detail_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backend.AadharDetail')),
('constituency_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backend.Constituency')),
('election_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backend.Election')),
('party_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backend.Party')),
],
),
migrations.DeleteModel(
name='AadharData',
),
migrations.AddField(
model_name='aadhardetail',
name='constituency_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backend.Constituency'),
),
]
| 41.24359
| 128
| 0.565434
|
4a05fbf1070daad181d215f558e8cb4f14cf7efa
| 23,335
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_azure_firewalls_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_azure_firewalls_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_azure_firewalls_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallsOperations(object):
"""AzureFirewallsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AzureFirewall"
"""Gets the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureFirewall, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_04_01.models.AzureFirewall
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
parameters, # type: "_models.AzureFirewall"
**kwargs # type: Any
):
# type: (...) -> "_models.AzureFirewall"
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureFirewall')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
azure_firewall_name, # type: str
parameters, # type: "_models.AzureFirewall"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.AzureFirewall"]
"""Creates or updates the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:param parameters: Parameters supplied to the create or update Azure Firewall operation.
:type parameters: ~azure.mgmt.network.v2018_04_01.models.AzureFirewall
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either AzureFirewall or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_04_01.models.AzureFirewall]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AzureFirewallListResult"]
"""Lists all Azure Firewalls in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_04_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AzureFirewallListResult"]
"""Gets all the Azure Firewalls in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_04_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
| 48.21281
| 197
| 0.659567
|
4a05fcc1af80d1bf00710e4fa64fabcf5c7f6780
| 9,567
|
py
|
Python
|
regression/efas/boxplot_reg.py
|
b8raoult/magics
|
eb2c86ec6e392e89c90044128dc671f22283d6ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-12-07T23:10:50.000Z
|
2022-02-19T03:01:49.000Z
|
regression/efas/boxplot_reg.py
|
b8raoult/magics
|
eb2c86ec6e392e89c90044128dc671f22283d6ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 59
|
2019-01-04T15:43:30.000Z
|
2022-03-31T09:48:15.000Z
|
regression/efas/boxplot_reg.py
|
b8raoult/magics
|
eb2c86ec6e392e89c90044128dc671f22283d6ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2019-01-07T14:36:33.000Z
|
2021-09-06T14:48:36.000Z
|
# (C) Copyright 1996-2016 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
from csv import reader
from datetime import datetime
from Magics.macro import *
#read input file
f= file('boxplot.data','rb')
r= reader(f,delimiter=';')
rows= []
for row in r:
#skip useless lines
if row[0][0]=='X': continue
if row[0][0]=='-': continue
if row[0][0]==' ': continue
rows+= [row]
f.close()
#limit values
max_date= '0'
min_date= '9'
max_y= -10000000000
min_y= 10000000000
#dictionary structure
data= {}
data['WB_FOR']= {}
data['WB_FOR']['DATE']= []
data['WB_FOR']['DWD']= []
data['WB_FOR']['EUD']= []
data['WB_OBS']= {}
data['WB_OBS']['DATE']= []
data['WB_OBS']['OBS']= []
data['WB_OBS']= {}
data['WB_OBS']['DATE']= []
data['WB_OBS']['OBS']= []
data['FOR_DETETMINISTIC']= {}
data['FOR_DETETMINISTIC']['DATE']= []
data['FOR_DETETMINISTIC']['DWD']= []
data['FOR_DETETMINISTIC']['EUD']= []
data['FOR_PROBABILISTIC']= {}
data['FOR_PROBABILISTIC']['DATE']= []
data['FOR_PROBABILISTIC']['QMIN']= []
data['FOR_PROBABILISTIC']['Q25']= []
data['FOR_PROBABILISTIC']['QMED']= []
data['FOR_PROBABILISTIC']['Q75']= []
data['FOR_PROBABILISTIC']['QMAX']= []
#fill the dictionary
row_type= ''
for row in rows:
f1= row[0]
if f1.find('WB_FOR')==0:
row_type= 'FOR'
continue
if f1.find('WB_OBS')==0:
row_type= 'OBS'
continue
if f1.find('FOR_DETETMINISTIC')==0:
row_type= 'DET'
continue
if f1.find('FOR_PROBABILISTIC')==0:
row_type= 'PRO'
continue
if f1.find('THlow')==0:
data['THlow']= float(row[0].split('=')[1])
data['THmedium']= float(row[1].split('=')[1])
data['THHigh']= float(row[2].split('=')[1])
data['THextreme']= float(row[3].split('=')[1])
continue
#convert numbers format
row= [row[0]]+[float(ele) for ele in row[1:]]
if row_type=='FOR':
date= datetime.strptime(row[0],'%m/%d/%Y %I:%M:%S %p')
row[0]= datetime.strftime(date,'%Y-%m-%d %H:%M:%S')
data['WB_FOR']['DATE']+= [row[0]]
data['WB_FOR']['DWD']+= [row[1]]
data['WB_FOR']['EUD']+= [row[2]]
if row_type=='OBS':
date= datetime.strptime(row[0],'%m/%d/%Y %I:%M:%S %p')
row[0]= datetime.strftime(date,'%Y-%m-%d %H:%M:%S')
data['WB_OBS']['DATE']+= [row[0]]
data['WB_OBS']['OBS']+= [row[1]]
if row_type=='DET':
date= datetime.strptime(row[0],'%d/%m/%Y %I:%M %p')
row[0]= datetime.strftime(date,'%Y-%m-%d %H:%M:00')
data['FOR_DETETMINISTIC']['DATE']+= [row[0]]
data['FOR_DETETMINISTIC']['DWD']+= [row[1]]
data['FOR_DETETMINISTIC']['EUD']+= [row[2]]
if row_type=='PRO':
date= datetime.strptime(row[0],'%m/%d/%Y %I:%M:%S %p')
row[0]= datetime.strftime(date,'%Y-%m-%d %H:%M:%S')
data['FOR_PROBABILISTIC']['DATE']+= [row[0]]
data['FOR_PROBABILISTIC']['QMIN']+= [row[1]]
data['FOR_PROBABILISTIC']['Q25']+= [row[2]]
data['FOR_PROBABILISTIC']['QMED']+= [row[3]]
data['FOR_PROBABILISTIC']['Q75']+= [row[4]]
data['FOR_PROBABILISTIC']['QMAX']+= [row[5]]
#calculate range of dates
max_date= max(max_date,row[0])
min_date= min(min_date,row[0])
#do not use missing values when calculating extreme values
values= [ele for ele in row[1:] if ele!=1.7e+308]
max_y= max([max_y]+values)
min_y= min([min_y]+values)
#################################################################
#test
for key in data:
obj= data[key]
if type(obj)==type({}):
for k2 in obj: print key,k2,obj[k2]
else:
print key, obj
print 'limits values:'
print 'x:',[min_date,max_date]
print 'y:',[min_y,max_y]
#################################################################
output = output(output_formats=['png'],
output_name_first_page_number='off',
output_name="boxplot_reg")
min = round(min_y - ((max_y-min_y)*0.1))
max = round(max_y + ((max_y-min_y)*0.1))
# Setting the cartesian view
projection = mmap(
subpage_y_position=2.,
subpage_map_projection='cartesian',
subpage_x_axis_type='date',
subpage_y_axis_type='regular',
subpage_x_date_min=min_date,
subpage_x_date_max=max_date,
subpage_y_min=min,
subpage_y_max=max,
)
# Vertical axis
vertical = maxis(
axis_orientation='vertical',
axis_type='regular',
axis_tick_label_height=0.40,
axis_tick_label_colour='navy',
axis_grid='on',
axis_grid_colour='grey',
axis_grid_thickness=1,
axis_grid_line_style='dot',
axis_title='on',
axis_title_text='Discharge (m3/s)',
axis_title_font_style='bold',
)
# Horizontal axis
horizontal = maxis(
axis_orientation='horizontal',
axis_type='date',
axis_grid='on',
axis_days_label_height=0.40,
axis_months_label_height=0.40,
axis_years_label_height=0.50,
axis_grid_colour='grey',
axis_grid_thickness=1,
axis_grid_line_style='dot',
axis_title='on',
axis_title_text='Date (days)',
axis_title_font_style='bold',
)
# dwd black curve
dwd_input = minput(input_x_type='date',
input_date_x_values=data['FOR_DETETMINISTIC']['DATE'],
input_y_values=data['FOR_DETETMINISTIC']['DWD'])
dwd_graph = mgraph(graph_line_colour='black',
graph_line_thickness=4,
legend='on',
legend_user_text="DWD")
# eud red curve
eud_input = minput(input_x_type='date',
input_date_x_values=data['FOR_DETETMINISTIC']['DATE'],
input_y_values=data['FOR_DETETMINISTIC']['EUD'])
eud_graph = mgraph(graph_line_colour='red',
graph_line_thickness=4,
legend='on',
legend_user_text="ECMWF")
# box plot
boxplot = mboxplot(boxplot_date_positions=data['FOR_PROBABILISTIC']['DATE'],
boxplot_minimum_values = [x-50. for x in data['FOR_PROBABILISTIC']['QMIN']],
boxplot_maximum_values = [x +50. for x in data['FOR_PROBABILISTIC']['QMAX']],
boxplot_box_upper_values = [x +50. for x in data['FOR_PROBABILISTIC']['Q75']],
boxplot_box_lower_values = data['FOR_PROBABILISTIC']['Q25'],
boxplot_median_values = data['FOR_PROBABILISTIC']['QMED'],
boxplot_box_colour = "rgb(0.65,0.58,0.92)")
# wb_obs
obs_input = minput(input_x_type='date',
input_date_x_values=data['WB_OBS']['DATE'],
input_y_values=data['WB_OBS']['OBS'])
# obs plotting
obs_symb = msymb(
symbol_type='marker',
symbol_colour='black',
symbol_height=0.5,
symbol_marker_index=15,
legend='on',
legend_user_text="WB_obs"
)
# wb_for for eud
eud_for_input = minput(input_x_type='date',
input_date_x_values=data['WB_FOR']['DATE'],
input_y_values=data['WB_FOR']['EUD'])
# obs plotting
eud_symb = msymb(
symbol_type='marker',
symbol_colour='black',
symbol_height=0.3,
symbol_connect_line='false',
symbol_marker_index=15,
legend='on',
legend_user_text="WB_ECMWF"
)
# wb_for for dwd
dwd_for_input = minput(input_x_type='date',
input_date_x_values=data['WB_FOR']['DATE'],
input_y_values=data['WB_FOR']['DWD'])
# obs plotting
dwd_symb = msymb(
symbol_type='marker',
symbol_colour='red',
symbol_height=0.3,
symbol_marker_index=15,
legend='on',
legend_user_text="WB_DWD",
)
# wb_for for dwd
dates = [min_date, max_date]
print data["THlow"]
lows = [data["THlow"], data["THlow"]]
mediums = [data["THmedium"], data["THmedium"]]
highs = [data["THHigh"], data["THHigh"]]
extremes = [data["THextreme"], data["THextreme"]]
green ='rgb(0.78,0.95,0.17)'
yellow ='rgb(0.98,0.96,0.02)'
red ='rgb(0.93,0.34,0.35)'
purple ='rgb(0.79,0.35,0.95)'
low = mgraph(
x_date_values = dates,
x2_date_values = dates,
y_values = lows,
y2_values = mediums,
graph_line_colour=green,
graph_shade_colour=green,
graph_line_thickness=4,
graph_type='area',
graph_shade='on',
legend='on',
legend_user_text="Low%.2f"% (data["THlow"]))
medium = mgraph(
x_date_values = dates,
x2_date_values = dates,
y_values = mediums,
y2_values = highs,
graph_line_colour=yellow,
graph_shade_colour=yellow,
graph_line_thickness=4,
graph_type='area',
graph_shade='on',
legend='on',
legend_user_text="Med-%.2f"% (data["THmedium"]))
high = mgraph(
x_date_values = dates,
x2_date_values = dates,
y_values = highs,
y2_values = extremes,
graph_line_colour=red,
graph_shade_colour=red,
graph_line_thickness=4,
graph_type='area',
graph_shade='on',
legend='on',
legend_user_text="High-%.2f" % (data["THHigh"]))
extreme = mgraph(
x_date_values = dates,
x2_date_values = dates,
y_values = extremes,
y2_values = [max, max],
graph_line_colour="yellow",
graph_shade_colour=purple,
graph_line_thickness=6,
graph_type='area',
graph_shade='on',
legend='on',
legend_user_text="Sev-%.2f" % (data["THextreme"]))
plot(output, projection, vertical, horizontal,
low , medium, high, extreme,
eud_input, eud_graph,
dwd_input, dwd_graph,
boxplot,
eud_for_input, eud_symb,
dwd_for_input, dwd_symb,
obs_input, obs_symb
)
| 28.72973
| 86
| 0.618062
|
4a05fdb4d1902a1a453a65b732fcef322fbc5c00
| 5,844
|
py
|
Python
|
planemo/lint.py
|
TMiguelT/planemo
|
deccc48cb15ea9e670f1dbbc0d6dd1e96fe96418
|
[
"CC-BY-3.0"
] | null | null | null |
planemo/lint.py
|
TMiguelT/planemo
|
deccc48cb15ea9e670f1dbbc0d6dd1e96fe96418
|
[
"CC-BY-3.0"
] | null | null | null |
planemo/lint.py
|
TMiguelT/planemo
|
deccc48cb15ea9e670f1dbbc0d6dd1e96fe96418
|
[
"CC-BY-3.0"
] | null | null | null |
"""Utilities to help linting various targets."""
from __future__ import absolute_import
import os
import requests
from galaxy.tool_util.lint import LintContext
from six.moves.urllib.request import urlopen
import planemo.linters.biocontainer_registered
import planemo.linters.conda_requirements
import planemo.linters.doi
import planemo.linters.urls
import planemo.linters.xsd
from planemo.io import error
from planemo.shed import find_urls_for_xml
from planemo.xml import validation
def build_lint_args(ctx, **kwds):
"""Handle common report, error, and skip linting arguments."""
report_level = kwds.get("report_level", "all")
fail_level = kwds.get("fail_level", "warn")
skip = kwds.get("skip", None)
if skip is None:
skip = ctx.global_config.get("lint_skip", "")
if isinstance(skip, list):
skip = ",".join(skip)
skip_types = [s.strip() for s in skip.split(",")]
lint_args = dict(
level=report_level,
fail_level=fail_level,
extra_modules=_lint_extra_modules(**kwds),
skip_types=skip_types,
)
return lint_args
# TODO: Move this back to tool_lint.
def _lint_extra_modules(**kwds):
linters = []
if kwds.get("xsd", True):
linters.append(planemo.linters.xsd)
if kwds.get("doi", False):
linters.append(planemo.linters.doi)
if kwds.get("urls", False):
linters.append(planemo.linters.urls)
if kwds.get("conda_requirements", False):
linters.append(planemo.linters.conda_requirements)
if kwds.get("biocontainer", False):
linters.append(planemo.linters.biocontainer_registered)
return linters
def setup_lint(ctx, **kwds):
"""Prepare lint_args and lint_ctx to begin linting a target."""
lint_args = build_lint_args(ctx, **kwds)
lint_ctx = LintContext(lint_args["level"])
return lint_args, lint_ctx
def handle_lint_complete(lint_ctx, lint_args, failed=False):
"""Complete linting of a target and decide exit code."""
if not failed:
failed = lint_ctx.failed(lint_args["fail_level"])
if failed:
error("Failed linting")
return 1 if failed else 0
def lint_dois(tool_xml, lint_ctx):
"""Find referenced DOIs and check they have valid with https://doi.org."""
dois = find_dois_for_xml(tool_xml)
for publication in dois:
is_doi(publication, lint_ctx)
def find_dois_for_xml(tool_xml):
dois = []
for element in tool_xml.getroot().findall("citations"):
for citation in list(element):
if citation.tag == 'citation' and citation.attrib.get('type', '') == 'doi':
dois.append(citation.text)
return dois
def is_doi(publication_id, lint_ctx):
"""Check if dx.doi knows about the ``publication_id``."""
base_url = "https://doi.org"
if publication_id is None:
lint_ctx.error('Empty DOI citation')
return
publication_id = publication_id.strip()
doiless_publication_id = publication_id.split("doi:", 1)[-1]
if not doiless_publication_id:
lint_ctx.error('Empty DOI citation')
return
url = "%s/%s" % (base_url, doiless_publication_id)
r = requests.get(url)
if r.status_code == 200:
if publication_id != doiless_publication_id:
lint_ctx.error("%s is valid, but Galaxy expects DOI without 'doi:' prefix" % publication_id)
else:
lint_ctx.info("%s is a valid DOI" % publication_id)
elif r.status_code == 404:
lint_ctx.error("%s is not a valid DOI" % publication_id)
else:
lint_ctx.warn("dx.doi returned unexpected status code %d" % r.status_code)
def lint_xsd(lint_ctx, schema_path, path):
"""Lint XML at specified path with supplied schema."""
name = lint_ctx.object_name or os.path.basename(path)
validator = validation.get_validator(require=True)
validation_result = validator.validate(schema_path, path)
if not validation_result.passed:
msg = "Invalid XML found in file: %s. Errors [%s]"
msg = msg % (name, validation_result.output)
lint_ctx.error(msg)
else:
lint_ctx.info("File validates against XML schema.")
def lint_urls(root, lint_ctx):
"""Find referenced URLs and verify they are valid."""
urls, docs = find_urls_for_xml(root)
# This is from Google Chome on macOS, current at time of writing:
BROWSER_USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36"
def validate_url(url, lint_ctx, user_agent=None):
is_valid = True
if url.startswith('http://') or url.startswith('https://'):
if user_agent:
headers = {"User-Agent": user_agent, 'Accept': '*/*'}
else:
headers = None
r = None
try:
r = requests.get(url, headers=headers, stream=True)
r.raise_for_status()
next(r.iter_content(1000))
except Exception as e:
if r and r.status_code == 429:
# too many requests
pass
else:
is_valid = False
lint_ctx.error("Error '%s' accessing %s" % (e, url))
else:
try:
with urlopen(url) as handle:
handle.read(100)
except Exception as e:
is_valid = False
lint_ctx.error("Error '%s' accessing %s" % (e, url))
if is_valid:
lint_ctx.info("URL OK %s" % url)
for url in urls:
validate_url(url, lint_ctx)
for url in docs:
validate_url(url, lint_ctx, BROWSER_USER_AGENT)
__all__ = (
"build_lint_args",
"handle_lint_complete",
"lint_dois",
"lint_urls",
"lint_xsd",
)
| 32.648045
| 147
| 0.636721
|
4a06002baa15e09aa4e8731bb03e95e0b8385d23
| 893
|
py
|
Python
|
tests/unit/test_models/test_submodels/test_external_circuit/test_function_control.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/test_models/test_submodels/test_external_circuit/test_function_control.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/test_models/test_submodels/test_external_circuit/test_function_control.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Test function control submodel
#
import pybamm
import tests
import unittest
def external_circuit_function(variables):
I = variables["Current [A]"]
V = variables["Terminal voltage [V]"]
return V + I - pybamm.FunctionParameter("Current plus voltage function", pybamm.t)
class TestFunctionControl(unittest.TestCase):
def test_public_functions(self):
param = pybamm.standard_parameters_lithium_ion
submodel = pybamm.external_circuit.FunctionControl(
param, external_circuit_function
)
variables = {"Terminal voltage [V]": pybamm.Scalar(0)}
std_tests = tests.StandardSubModelTests(submodel, variables)
std_tests.test_all()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| 26.264706
| 86
| 0.68981
|
4a06002e3484652559a2192c44b953a2233ab840
| 1,463
|
py
|
Python
|
setup.py
|
iPlantCollaborativeOpenSource/rfive
|
6a50bfe9c646f23b4dfde7e7bbda2381f33552af
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
iPlantCollaborativeOpenSource/rfive
|
6a50bfe9c646f23b4dfde7e7bbda2381f33552af
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
iPlantCollaborativeOpenSource/rfive
|
6a50bfe9c646f23b4dfde7e7bbda2381f33552af
|
[
"BSD-3-Clause"
] | 2
|
2019-12-04T22:35:59.000Z
|
2019-12-11T22:37:02.000Z
|
import os
import setuptools
from rfive.version import get_version
readme = open('README.md').read()
long_description = """
rfive %s
A unified interface into multiple cloud providers.
To install use pip install git+git://git@github.com:iPlantCollaborativeOpenSource/rfive.git
----
%s
----
For more information, please see: https://github.com/iPlantCollaborativeOpenSource/rfive
""" % (get_version('short'), readme)
with open('requirements.txt') as r:
required = r.readlines()
setuptools.setup(
name='rfive',
version=get_version('short'),
author='iPlant Collaborative',
author_email='atmodevs@gmail.com',
description="A unified interface into multiple cloud providers.",
long_description=long_description,
license="Apache License, Version 2.0",
url="https://github.com/iPlantCollaborativeOpenSource/rfive",
packages=setuptools.find_packages(),
install_requires=required,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries",
"Topic :: System",
"Topic :: System :: Clustering",
"Topic :: System :: Distributed Computing",
"Topic :: System :: Systems Administration"
])
| 29.857143
| 91
| 0.683527
|
4a0601bd18a7344cd3e88615f713f901a87bc362
| 4,213
|
py
|
Python
|
Examples/Example_6/run.py
|
mrslezak/Engine
|
c46ff278a2c5f4162db91a7ab500a0bb8cef7657
|
[
"BSD-3-Clause"
] | 335
|
2016-10-07T16:31:10.000Z
|
2022-03-02T07:12:03.000Z
|
Examples/Example_6/run.py
|
mrslezak/Engine
|
c46ff278a2c5f4162db91a7ab500a0bb8cef7657
|
[
"BSD-3-Clause"
] | 59
|
2016-10-31T04:20:24.000Z
|
2022-01-03T16:39:57.000Z
|
Examples/Example_6/run.py
|
mrslezak/Engine
|
c46ff278a2c5f4162db91a7ab500a0bb8cef7657
|
[
"BSD-3-Clause"
] | 180
|
2016-10-08T14:23:50.000Z
|
2022-03-28T10:43:05.000Z
|
#!/usr/bin/env python
import sys
import os
sys.path.append('../')
from ore_examples_helper import OreExample
oreex = OreExample(sys.argv[1] if len(sys.argv)>1 else False)
# Portfolio 1 run
oreex.print_headline("Run ORE to produce NPV cube and exposures for portfolio 1")
oreex.run("Input/ore_portfolio_1.xml")
oreex.get_times("Output/portfolio_1/log.txt")
oreex.print_headline("Plot results for portfolio 1")
oreex.setup_plot("portfolio_1")
oreex.plot(os.path.join("portfolio_1", "exposure_trade_swap_01.csv"), 2, 3, 'b', "EPE Swap")
oreex.plot(os.path.join("portfolio_1", "exposure_trade_collar_01.csv"), 2, 4, 'r', "ENE Collar")
oreex.plot(os.path.join("portfolio_1", "exposure_nettingset_CPTY_A.csv"), 2, 4, 'g', "ENE Netting")
#oreex.plot(os.path.join("portfolio_1", "exposure_nettingset_CPTY_A.csv"), 2, 3, 'g', "EPE Netting")
oreex.decorate_plot(title="Example 6, Portfolio 1")
oreex.save_plot_to_file(os.path.join("Output", "portfolio_1"))
# Portfolio 2 run
oreex.print_headline("Run ORE to produce NPV cube and exposures for portfolio 2")
oreex.run("Input/ore_portfolio_2.xml")
oreex.get_times("Output/portfolio_2/log.txt")
oreex.print_headline("Plot results for portfolio 2")
oreex.setup_plot("portfolio_2")
oreex.plot(os.path.join("portfolio_2", "exposure_trade_floor_01.csv"), 2, 3, 'b', "EPE Floor")
oreex.plot(os.path.join("portfolio_2", "exposure_trade_cap_01.csv"), 2, 4, 'r', "ENE Cap")
oreex.plot(os.path.join("portfolio_2", "exposure_nettingset_CPTY_B.csv"), 2, 3, 'g', "EPE Net Cap and Floor")
oreex.plot(os.path.join("portfolio_2", "exposure_trade_collar_02.csv"), 2, 4, 'g', "ENE Collar", offset=1, marker='o', linestyle='')
oreex.decorate_plot(title="Example 6, Portfolio 2")
oreex.save_plot_to_file(os.path.join("Output", "portfolio_2"))
# Portfolio 3 run
oreex.print_headline("Run ORE to produce NPV cube and exposures for portfolio 3")
oreex.run("Input/ore_portfolio_3.xml")
oreex.get_times("Output/portfolio_3/log.txt")
oreex.print_headline("Plot results for portfolio 3")
oreex.setup_plot("portfolio_3")
oreex.plot(os.path.join("portfolio_3", "exposure_trade_cap_02.csv"), 2, 3, 'b', "EPE Cap")
oreex.plot(os.path.join("portfolio_3", "exposure_trade_cap_03.csv"), 2, 4, 'r', "ENE Amortising Cap")
oreex.plot(os.path.join("portfolio_3", "exposure_nettingset_CPTY_B.csv"), 2, 3, 'g', "EPE Netted")
oreex.decorate_plot(title="Example 6, Portfolio 3")
oreex.save_plot_to_file(os.path.join("Output", "portfolio_3"))
# Portfolio 4 run
oreex.print_headline("Run ORE to produce NPV cube and exposures for portfolio 4")
oreex.run("Input/ore_portfolio_4.xml")
oreex.get_times("Output/portfolio_4/log.txt")
oreex.print_headline("Plot results for portfolio 4")
oreex.setup_plot("portfolio_4")
oreex.plot(os.path.join("portfolio_4", "exposure_nettingset_CPTY_A.csv"), 2, 3, 'b', "EPE Swap + Collar")
oreex.plot(os.path.join("portfolio_4", "exposure_nettingset_CPTY_A.csv"), 2, 4, 'r', "ENE Swap + Collar")
oreex.plot(os.path.join("portfolio_4", "exposure_nettingset_CPTY_B.csv"), 2, 3, 'b', "EPE CapFloored Swap", offset=1, marker='o', linestyle='')
oreex.plot(os.path.join("portfolio_4", "exposure_nettingset_CPTY_B.csv"), 2, 4, 'r', "ENE CapFloored Swap", offset=1, marker='o', linestyle='')
oreex.decorate_plot(title="Example 6, Portfolio 4")
oreex.save_plot_to_file(os.path.join("Output", "portfolio_4"))
# Portfolio 5 run
oreex.print_headline("Run ORE to produce NPV cube and exposures for portfolio 5")
oreex.run("Input/ore_portfolio_5.xml")
oreex.get_times("Output/portfolio_5/log.txt")
oreex.print_headline("Plot results for portfolio 5")
oreex.setup_plot("portfolio_5")
oreex.plot(os.path.join("portfolio_5", "exposure_nettingset_CPTY_A.csv"), 2, 3, 'b', "EPE Capped swap")
oreex.plot(os.path.join("portfolio_5", "exposure_nettingset_CPTY_A.csv"), 2, 4, 'r', "ENE Capped swap")
oreex.plot(os.path.join("portfolio_5", "exposure_nettingset_CPTY_B.csv"), 2, 3, 'b', "EPE Swap + Cap", offset=1, marker='o', linestyle='')
oreex.plot(os.path.join("portfolio_5", "exposure_nettingset_CPTY_B.csv"), 2, 4, 'r', "ENE Swap + Cap", offset=1, marker='o', linestyle='')
oreex.decorate_plot(title="Example 6, Portfolio 5")
oreex.save_plot_to_file(os.path.join("Output", "portfolio_5"))
| 50.759036
| 143
| 0.745075
|
4a06042c989cba7437d95ac442d69d1549bd29e1
| 12,632
|
py
|
Python
|
analysis/interaction_analysis_all_dbs.py
|
broncio123/mpmodeling
|
4910d6fc8822fd7358edeca1ed2e57383ec5bc35
|
[
"MIT"
] | null | null | null |
analysis/interaction_analysis_all_dbs.py
|
broncio123/mpmodeling
|
4910d6fc8822fd7358edeca1ed2e57383ec5bc35
|
[
"MIT"
] | null | null | null |
analysis/interaction_analysis_all_dbs.py
|
broncio123/mpmodeling
|
4910d6fc8822fd7358edeca1ed2e57383ec5bc35
|
[
"MIT"
] | null | null | null |
import os
import sys
import numpy
import pandas as pd
import json
import subprocess
import isambard_dev
import operator
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from operator import itemgetter
import seaborn as sns
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
##########################################################
modules_paths = [
"/home/ba13026/mpmodeling/analysis",
"/home/ba13026/mpmodeling/protocols"
]
for path in modules_paths:
if path not in sys.path:
sys.path.append(path)
##########################################################
from cluster_transfer import BG
import setup_geometry_interactions_db
from setup_geometry_interactions_db import \
Json,Tags,RigidBody,RadialProfiles,Miscellaneous,Interhelix_Interactions, Base
from insert2db_geometry_interactions import interaction_direction
import geometry_interactions
from geometry_interactions import Models, Analyse_Interactions, start_session, sort_superbase
##########################################################
inter_type = sys.argv[1] # Define either 'kihs' or 'hbonds'
start_frame_no = int(sys.argv[2])
WD = BG.workdir+'md_100ns_dbs/'
##########################################################
class MyDB:
def __init__(self):
self.db_path = ''
self.name = ''
self.tags = []
self.id_extractor = ''
##########################################################
# DOCKED MODELS
docked = MyDB()
db_path = WD+'mutants_docked_geometry-interactions.db'
docked.db_path = db_path
docked.name = 'docked'
docked.tags = [
json.dumps(['cWza','conformation0']),
json.dumps(['cWza','conformation1']),
json.dumps(['cWza-K375C','conformation0']),
json.dumps(['cWza-K375C','conformation1']),
json.dumps(['cWza-S355C','conformation0']),
json.dumps(['cWza-S355C','conformation1']),
json.dumps(['cWza-Y373C','conformation0'])
]
# STUFF FOR DATABASE ID EXTRACTION
with open(WD+'filtered_ids_new.json','r') as fp:
Filtered_IDs = json.load(fp)
def extractor_docked(session, tags):
mutant, group = json.loads(tags)
return list(Filtered_IDs[mutant][group])
docked.id_extractor = extractor_docked
##########################################################
# EMMD MODELS
emmd = MyDB()
db_path = WD+'mutants_em-conformations_geometry-interactions.db'
emmd.db_path = db_path
emmd.name = 'emmd'
emmd.tags = [
json.dumps(['cWza','em-conformation0']),
json.dumps(['cWza','em-conformation1']),
json.dumps(['cWza-K375C','em-conformation0']),
json.dumps(['cWza-K375C','em-conformation1']),
json.dumps(['cWza-S355C','em-conformation0']),
json.dumps(['cWza-S355C','em-conformation1']),
json.dumps(['cWza-Y373C','em-conformation1'])
]
def extractor_emmd(mysession, tags):
mutant, group = json.loads(tags)
return [x[0] for x in mysession.query(Tags.id).filter_by(mutant=mutant,group=group).all()]
emmd.id_extractor = extractor_emmd
##########################################################
# PRMD MODELS
prmd = MyDB()
db_path = WD+'mutants_prmd-conformations_geometry-interactions.db'
prmd.db_path = db_path
prmd.name = 'prmd'
prmd.tags = [
json.dumps(['cWza','conformation0']),
json.dumps(['cWza','conformation1']),
json.dumps(['cWza-K375C','conformation0']),
json.dumps(['cWza-K375C','conformation1']),
json.dumps(['cWza-S355C','conformation0']),
json.dumps(['cWza-S355C','conformation1']),
json.dumps(['cWza-Y373C','conformation1'])
]
def get_ModelsIDs_prmd(session):
inter_tags = {
'hbonds': Interhelix_Interactions.hbonds,
'kihs': Interhelix_Interactions.kihs
}
with open(WD+'mutants_prmd_conformations_pdb_paths.json','r') as fp:
DBTags = json.load(fp)
FrameRange = range(40,51)
ModelsIDs_per_tag = {}
for tag in prmd.tags:
ModelsIDs_per_tag[tag] = []
for info in DBTags:
mutant, group, pdb_name = info[0]
conformation, stage_name, frame_no = group.split(':')
db_tag = json.dumps([mutant, conformation])
if (db_tag == tag) and (int(frame_no) in FrameRange):
try:
db_id = session.query(Tags.id).filter_by(mutant=mutant,group=group,pdb_name=pdb_name).first()[0]
ModelsIDs_per_tag[tag].append(db_id)
except:
print("No ID for model: ", mutant, group, pdb_name)
# Find IDs with no interaction data
IDs_to_remove = []
for i in range(len(prmd.tags)):
for id in list(ModelsIDs_per_tag[prmd.tags[i]]):
for inter_type in inter_tags.keys():
try:
data = session.query(inter_tags[inter_type]).filter_by(id=id).first()[0]
except:
IDs_to_remove.append([prmd.tags[i], id])
# Remove IDs with no interaction data
for x in IDs_to_remove:
tag, id = x
try:
ModelsIDs_per_tag[tag].remove(id)
except:
pass
return ModelsIDs_per_tag
def extractor_prmd(session, db_tag):
ModelsIDs_per_tag = get_ModelsIDs_prmd(session)
return ModelsIDs_per_tag[db_tag]
prmd.id_extractor = extractor_prmd
##########################################################
# URMD MODELS
urmd = MyDB()
n = start_frame_no
db_path = WD+'mutants_urmd_'+str(n)+'-'+str(n+10)+'ns-conformations_geometry-interactions.db'
urmd.db_path = db_path
urmd.name = 'urmd_'+str(n)+'-'+str(n+10)+'ns'
urmd.tags = [
json.dumps(['cWza','conformation0']),
json.dumps(['cWza','conformation1']),
json.dumps(['cWza-K375C','conformation0']),
json.dumps(['cWza-K375C','conformation1']),
json.dumps(['cWza-S355C','conformation0']),
json.dumps(['cWza-S355C','conformation1']),
json.dumps(['cWza-Y373C','conformation1'])
]
##########################################################
# STUFF FOR DATABASE ID EXTRACTION
##########################################################
def get_ModelsIDs_urmd(session):
inter_tags = {
'hbonds': Interhelix_Interactions.hbonds,
'kihs': Interhelix_Interactions.kihs
}
n = start_frame_no
with open(WD+'mutants_urmd_'+str(n)+'-'+str(n+10)+'ns-conformations_pdb_paths.json','r') as fp:
DBTags = json.load(fp)
FrameRange = range(start_frame_no,start_frame_no+11)
ModelsIDs_per_tag = {}
for tag in urmd.tags:
ModelsIDs_per_tag[tag] = []
for info in DBTags:
mutant, group, pdb_name = info[0]
conformation, stage_name, frame_no = group.split(':')
db_tag = json.dumps([mutant, conformation])
if (db_tag == tag) and (int(frame_no) in FrameRange):
try:
db_id = session.query(Tags.id).filter_by(mutant=mutant,group=group,pdb_name=pdb_name).first()[0]
ModelsIDs_per_tag[tag].append(db_id)
except:
print("No ID available for model: ", mutant, group, pdb_name)
# Find IDs with no interaction data
IDs_to_remove = []
for i in range(len(urmd.tags)):
for id in list(ModelsIDs_per_tag[urmd.tags[i]]):
for inter_type in inter_tags.keys():
try:
data = session.query(inter_tags[inter_type]).filter_by(id=id).first()[0]
except:
IDs_to_remove.append([urmd.tags[i], id])
# Remove IDs with no interaction data
for x in IDs_to_remove:
tag, id = x
try:
ModelsIDs_per_tag[tag].remove(id)
except:
pass
return ModelsIDs_per_tag
def extractor_urmd(session, db_tag):
ModelsIDs_per_tag = get_ModelsIDs_urmd(session)
return ModelsIDs_per_tag[db_tag]
urmd.id_extractor = extractor_urmd
##########################################################
def visualise_data0(df, Labels, stage, dc, inter_type):
Inter_Labels = {
'hbonds': 'HO-atoms',
'kihs': 'KIH-atoms'
}
fig,ax = plt.subplots(2,2,figsize=(14,16))
mutant_names = ['cWza','cWza-K375C','cWza-S355C','cWza-Y373C']
axes = {
'cWza':ax[0,0],
'cWza-K375C':ax[0,1],
'cWza-S355C':ax[1,0],
'cWza-Y373C':ax[1,1]
}
DFColumns = {
'cWza':[0+dc,1+dc],
'cWza-K375C':[2+dc,3+dc],
'cWza-S355C':[4+dc,5+dc],
'cWza-Y373C':[6+dc]
}
Colors = {
'cWza':['blue','green'],
'cWza-K375C':['blue','green'],
'cWza-S355C':['blue','green'],
'cWza-Y373C':['green']
}
Conformation = {
'cWza':['Narrow', 'Wide'],
'cWza-K375C':['Narrow', 'Wide'],
'cWza-S355C':['Narrow', 'Wide'],
'cWza-Y373C':['Single'],
}
for mutant in mutant_names:
df.plot(kind='barh', y=DFColumns[mutant],color=Colors[mutant],ax=axes[mutant])
axes[mutant].legend(Conformation[mutant],loc='best',fontsize=15)
axes[mutant].set_xlim(0,1)
axes[mutant].tick_params(axis='both',direction='in')
axes[mutant].set_title(mutant+" : "+stage.name, fontsize=28)
ax[1,0].set_xlabel("probability", fontsize=22)
ax[1,1].set_xlabel("probability", fontsize=22)
ax[0,0].set_ylabel("")
ax[1,0].set_ylabel("")
ax[0,1].set_ylabel(Inter_Labels[inter_type], fontsize=25)
ax[1,1].set_ylabel(Inter_Labels[inter_type], fontsize=25)
axes['cWza'].set_yticklabels(itemgetter(*list(df.index))(Labels), fontsize=15)
axes['cWza-S355C'].set_yticklabels(itemgetter(*list(df.index))(Labels), fontsize=15)
axes['cWza-K375C'].set_yticklabels("")
axes['cWza-Y373C'].set_yticklabels("")
fig.tight_layout()
plt.show()
filename = WD+inter_type+'_'+stage.name+'_'+urmd.name+'.png'
plt.savefig(filename)
##########################################################
# PERFORM CALCULATION
STAGES = [docked, emmd, prmd, urmd]
##########################################################
Superbases = {}
Interaction_data = {}
Analyses = {}
for stage in STAGES:
stage_session = start_session(stage.db_path)
models = Models(stage_session)
MyTags = stage.tags
Interaction_data[stage.name] = {}
Superbases[stage.name] = {}
Analyses[stage.name] = {}
for tags in MyTags:
mutant, group = json.loads(tags)
models.ids = stage.id_extractor(models.session, tags)
analysis = Analyse_Interactions(models)
Analyses[stage.name][tags] = analysis
Interaction_data[stage.name][tags] = analysis.get_interaction_data(inter_type)
sbase = analysis.get_superbase(inter_type)
Superbases[stage.name][tags] = sbase
def NestedDictValues(d):
for v in d.values():
if isinstance(v, dict):
yield from NestedDictValues(v)
else:
yield v
unified_sbase = set()
for sb in list(NestedDictValues(Superbases)):
unified_sbase = unified_sbase.union(set(sb))
sbase = list(unified_sbase)
sbase0 = sort_superbase(list(sbase), N_residues=32)
with open(WD+"superbase_"+inter_type+"_docked2urmd.json",'w') as fp:
json.dump(fp,sbase0,indent=4)
###########################################################
# Probabilities are determined relative to the superbase
Probs = {}
for stage in STAGES:
MyTags = stage.tags
Probs[stage.name] = {}
for tags in MyTags:
analysis = Analyses[stage.name][tags]
atoms = Interaction_data[stage.name][tags]
stats = analysis.get_interaction_stats(sbase0, atoms)
prob = analysis.get_interaction_probability(sbase0, stats)
Probs[stage.name][tags] = prob
tools = geometry_interactions.Tools()
Labels = tools.labels_df(sbase0,inter_type)
###########################################################
# FRAME AND FILTER PROBABILITY DATA
SortedData = []
for stage in STAGES:
MyTags = stage.tags
for tag in MyTags:
SortedData.append(Probs[stage.name][tag])
SortedData = numpy.array(SortedData).T
import pandas as pd
df = pd.DataFrame( SortedData )
tolerance = 0.05
df = df[df > tolerance]
df = df[df.notnull().any(axis=1)]
# Save DataFrame
df.to_json(WD+'df_'+inter_type+'_docked_to_'+urmd.name+'.json')
##########################################################
# PLOT ALL DATA AND SAVE FIGURES
dc = 0
for stage in STAGES:
visualise_data0(df, Labels, stage, dc, inter_type)
dc = dc + 7
| 34.326087
| 116
| 0.584706
|
4a06049b9c90d256cd4b76da3077eb6a2675fdcc
| 2,401
|
py
|
Python
|
ramp-frontend/ramp_frontend/tests/test_utils.py
|
frcaud/ramp-board
|
3df90e51a4faeb0c03bab5dc13e12311807a618e
|
[
"BSD-3-Clause"
] | 13
|
2019-02-16T22:30:11.000Z
|
2021-01-11T10:13:47.000Z
|
ramp-frontend/ramp_frontend/tests/test_utils.py
|
frcaud/ramp-board
|
3df90e51a4faeb0c03bab5dc13e12311807a618e
|
[
"BSD-3-Clause"
] | 427
|
2018-11-22T22:01:47.000Z
|
2022-03-15T17:35:57.000Z
|
ramp-frontend/ramp_frontend/tests/test_utils.py
|
frcaud/ramp-board
|
3df90e51a4faeb0c03bab5dc13e12311807a618e
|
[
"BSD-3-Clause"
] | 18
|
2018-11-22T16:22:18.000Z
|
2021-12-07T14:42:41.000Z
|
import shutil
import pytest
from ramp_utils import generate_flask_config
from ramp_utils import read_config
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from ramp_database.model import Model
from ramp_database.testing import create_toy_db
from ramp_database.utils import setup_db
from ramp_database.utils import session_scope
from ramp_database.tools.user import get_user_by_name
from ramp_frontend import create_app
from ramp_frontend import mail
from ramp_frontend.utils import body_formatter_user
from ramp_frontend.utils import send_mail
from ramp_frontend.testing import _fail_no_smtp_server
@pytest.fixture(scope="module")
def client_session(database_connection):
database_config = read_config(database_config_template())
ramp_config = ramp_config_template()
try:
deployment_dir = create_toy_db(database_config, ramp_config)
flask_config = generate_flask_config(database_config)
app = create_app(flask_config)
app.config["TESTING"] = True
app.config["WTF_CSRF_ENABLED"] = False
with session_scope(database_config["sqlalchemy"]) as session:
yield app.test_client(), session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
try:
# In case of failure we should close the global flask engine
from ramp_frontend import db as db_flask
db_flask.session.close()
except RuntimeError:
pass
db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
@_fail_no_smtp_server
def test_send_mail(client_session):
client, _ = client_session
with client.application.app_context():
with mail.record_messages() as outbox:
send_mail("xx@gmail.com", "subject", "body")
assert len(outbox) == 1
assert outbox[0].subject == "subject"
assert outbox[0].body == "body"
assert outbox[0].recipients == ["xx@gmail.com"]
def test_body_formatter_user(client_session):
_, session = client_session
user = get_user_by_name(session, "test_user")
for word in [
"test_user",
"User",
"Test",
"linkedin",
"twitter",
"facebook",
"github",
"notes",
"bio",
]:
assert word in body_formatter_user(user)
| 31.592105
| 72
| 0.700541
|
4a0604b3e522eb57e403b23823c3ab7432552069
| 4,861
|
py
|
Python
|
async_limits/storage/memory.py
|
anomit/limits
|
a02d3234664d2b4da9968fd5ad25899ce106517a
|
[
"MIT"
] | 1
|
2021-06-21T13:51:56.000Z
|
2021-06-21T13:51:56.000Z
|
async_limits/storage/memory.py
|
anomit/limits
|
a02d3234664d2b4da9968fd5ad25899ce106517a
|
[
"MIT"
] | null | null | null |
async_limits/storage/memory.py
|
anomit/limits
|
a02d3234664d2b4da9968fd5ad25899ce106517a
|
[
"MIT"
] | null | null | null |
import threading
import time
from collections import Counter
from .base import Storage
class LockableEntry(threading._RLock):
__slots__ = ["atime", "expiry"]
def __init__(self, expiry):
self.atime = time.time()
self.expiry = self.atime + expiry
super(LockableEntry, self).__init__()
class MemoryStorage(Storage):
"""
rate limit storage using :class:`collections.Counter`
as an in memory storage for fixed and elastic window strategies,
and a simple list to implement moving window strategy.
"""
STORAGE_SCHEME = ["memory"]
def __init__(self, uri=None, **_):
self.storage = Counter()
self.expirations = {}
self.events = {}
self.timer = threading.Timer(0.01, self.__expire_events)
self.timer.start()
super(MemoryStorage, self).__init__(uri)
def __expire_events(self):
for key in self.events.keys():
for event in list(self.events[key]):
with event:
if (
event.expiry <= time.time()
and event in self.events[key]
):
self.events[key].remove(event)
for key in list(self.expirations.keys()):
if self.expirations[key] <= time.time():
self.storage.pop(key, None)
self.expirations.pop(key, None)
def __schedule_expiry(self):
if not self.timer.is_alive():
self.timer = threading.Timer(0.01, self.__expire_events)
self.timer.start()
def incr(self, key, expiry, elastic_expiry=False):
"""
increments the counter for a given rate limit key
:param str key: the key to increment
:param int expiry: amount in seconds for the key to expire in
:param bool elastic_expiry: whether to keep extending the rate limit
window every hit.
"""
self.get(key)
self.__schedule_expiry()
self.storage[key] += 1
if elastic_expiry or self.storage[key] == 1:
self.expirations[key] = time.time() + expiry
return self.storage.get(key, 0)
def get(self, key):
"""
:param str key: the key to get the counter value for
"""
if self.expirations.get(key, 0) <= time.time():
self.storage.pop(key, None)
self.expirations.pop(key, None)
return self.storage.get(key, 0)
def clear(self, key):
"""
:param str key: the key to clear rate async_limits for
"""
self.storage.pop(key, None)
self.expirations.pop(key, None)
self.events.pop(key, None)
def acquire_entry(self, key, limit, expiry, no_add=False):
"""
:param str key: rate limit key to acquire an entry in
:param int limit: amount of entries allowed
:param int expiry: expiry of the entry
:param bool no_add: if False an entry is not actually acquired
but instead serves as a 'check'
:rtype: bool
"""
self.events.setdefault(key, [])
self.__schedule_expiry()
timestamp = time.time()
try:
entry = self.events[key][limit - 1]
except IndexError:
entry = None
if entry and entry.atime >= timestamp - expiry:
return False
else:
if not no_add:
self.events[key].insert(0, LockableEntry(expiry))
return True
def get_expiry(self, key):
"""
:param str key: the key to get the expiry for
"""
return int(self.expirations.get(key, -1))
def get_num_acquired(self, key, expiry):
"""
returns the number of entries already acquired
:param str key: rate limit key to acquire an entry in
:param int expiry: expiry of the entry
"""
timestamp = time.time()
return len([
k for k in self.events[key] if k.atime >= timestamp - expiry
]) if self.events.get(key) else 0
def get_moving_window(self, key, limit, expiry):
"""
returns the starting point and the number of entries in the moving
window
:param str key: rate limit key
:param int expiry: expiry of entry
:return: (start of window, number of acquired entries)
"""
timestamp = time.time()
acquired = self.get_num_acquired(key, expiry)
for item in self.events.get(key, []):
if item.atime >= timestamp - expiry:
return int(item.atime), acquired
return int(timestamp), acquired
def check(self):
"""
check if storage is healthy
"""
return True
def reset(self):
self.storage.clear()
self.expirations.clear()
self.events.clear()
| 31.771242
| 76
| 0.575396
|
4a0605b04f9c93869d9a94979c835c3987fbaccb
| 1,929
|
py
|
Python
|
django/contrib/gis/db/backends/oracle/adapter.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/contrib/gis/db/backends/oracle/adapter.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/contrib/gis/db/backends/oracle/adapter.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
from cx_Oracle import CLOB
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.geos import GeometryCollection, Polygon
class OracleSpatialAdapter(WKTAdapter):
input_size = CLOB
def __init__(self, geom):
"""
Oracle requires that polygon rings are in proper orientation. This
affects spatial operations and an invalid orientation may cause
failures. Correct orientations are:
* Outer ring - counter clockwise
* Inner ring(s) - clockwise
"""
if isinstance(geom, Polygon):
self._fix_polygon(geom)
elif isinstance(geom, GeometryCollection):
self._fix_geometry_collection(geom)
self.wkt = geom.wkt
self.srid = geom.srid
def _fix_polygon(self, poly):
"""Fix single polygon orientation as described in __init__()."""
if self._isClockwise(poly.exterior_ring):
poly.exterior_ring = list(reversed(poly.exterior_ring))
for i in range(1, len(poly)):
if not self._isClockwise(poly[i]):
poly[i] = list(reversed(poly[i]))
return poly
def _fix_geometry_collection(self, coll):
"""
Fix polygon orientations in geometry collections as described in
__init__().
"""
for i, geom in enumerate(coll):
if isinstance(geom, Polygon):
coll[i] = self._fix_polygon(geom)
def _isClockwise(self, coords):
"""
A modified shoelace algorithm to determine polygon orientation.
See https://en.wikipedia.org/wiki/Shoelace_formula.
"""
n = len(coords)
area = 0.0
for i in range(n):
j = (i + 1) % n
area += coords[i][0] * coords[j][1]
area -= coords[j][0] * coords[i][1]
return area < 0.0
| 33.258621
| 75
| 0.587869
|
4a06071390e7618760ca6bac7689a3cad3e9ad9d
| 28,467
|
py
|
Python
|
qiskit/aqua/quantum_instance.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 504
|
2018-12-15T16:34:03.000Z
|
2022-03-26T11:24:53.000Z
|
qiskit/aqua/quantum_instance.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 746
|
2018-12-16T16:44:42.000Z
|
2021-07-10T16:59:43.000Z
|
qiskit/aqua/quantum_instance.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 421
|
2018-12-22T14:49:00.000Z
|
2022-03-04T09:47:07.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Quantum Instance module """
from typing import Optional, List, Union, Dict, Callable, Tuple
import copy
import logging
import time
import numpy as np
from qiskit.providers import Backend, BaseBackend
from qiskit.transpiler import CouplingMap, PassManager
from qiskit.transpiler.layout import Layout
from qiskit.assembler.run_config import RunConfig
from qiskit.circuit import QuantumCircuit
from qiskit.result import Result
from qiskit.qobj import Qobj
from qiskit import compiler
try:
from qiskit.providers.aer.noise import NoiseModel # pylint: disable=unused-import
except ImportError as ex:
pass
from .aqua_error import AquaError
from .utils.backend_utils import (is_ibmq_provider,
is_statevector_backend,
is_simulator_backend,
is_local_backend,
is_aer_qasm,
is_basicaer_provider,
support_backend_options)
from .utils.circuit_utils import summarize_circuits
logger = logging.getLogger(__name__)
class QuantumInstance:
"""Quantum Backend including execution setting."""
_BACKEND_CONFIG = ['basis_gates', 'coupling_map']
_COMPILE_CONFIG = ['initial_layout', 'seed_transpiler', 'optimization_level']
_RUN_CONFIG = ['shots', 'max_credits', 'memory', 'seed_simulator']
_QJOB_CONFIG = ['timeout', 'wait']
_NOISE_CONFIG = ['noise_model']
# https://github.com/Qiskit/qiskit-aer/blob/master/qiskit/providers/aer/backends/qasm_simulator.py
_BACKEND_OPTIONS_QASM_ONLY = ["statevector_sample_measure_opt", "max_parallel_shots"]
_BACKEND_OPTIONS = ["initial_statevector", "chop_threshold", "max_parallel_threads",
"max_parallel_experiments", "statevector_parallel_threshold",
"statevector_hpc_gate_opt"] + _BACKEND_OPTIONS_QASM_ONLY
def __init__(self,
backend: Union[Backend, BaseBackend],
# run config
shots: int = 1024,
seed_simulator: Optional[int] = None,
max_credits: int = 10,
# backend properties
basis_gates: Optional[List[str]] = None,
coupling_map: Optional[Union[CouplingMap, List[List]]] = None,
# transpile
initial_layout: Optional[Union[Layout, Dict, List]] = None,
pass_manager: Optional[PassManager] = None,
seed_transpiler: Optional[int] = None,
optimization_level: Optional[int] = None,
# simulation
backend_options: Optional[Dict] = None,
noise_model: Optional['NoiseModel'] = None,
# job
timeout: Optional[float] = None,
wait: float = 5.,
# others
skip_qobj_validation: bool = True,
measurement_error_mitigation_cls: Optional[Callable] = None,
cals_matrix_refresh_period: int = 30,
measurement_error_mitigation_shots: Optional[int] = None,
job_callback: Optional[Callable] = None) -> None:
"""
Quantum Instance holds a Qiskit Terra backend as well as configuration for circuit
transpilation and execution. When provided to an Aqua algorithm the algorithm will
execute the circuits it needs to run using the instance.
Args:
backend: Instance of selected backend
shots: Number of repetitions of each circuit, for sampling
seed_simulator: Random seed for simulators
max_credits: Maximum credits to use
basis_gates: List of basis gate names supported by the
target. Defaults to basis gates of the backend.
coupling_map: Coupling map (perhaps custom) to
target in mapping
initial_layout: Initial layout of qubits in mapping
pass_manager: Pass manager to handle how to compile the circuits
seed_transpiler: The random seed for circuit mapper
optimization_level: How much optimization to perform on the circuits.
Higher levels generate more optimized circuits, at the expense of longer
transpilation time.
backend_options: All running options for backend, please refer
to the provider of the backend for information as to what options it supports.
noise_model: noise model for simulator
timeout: Seconds to wait for job. If None, wait indefinitely.
wait: Seconds between queries for job result
skip_qobj_validation: Bypass Qobj validation to decrease circuit
processing time during submission to backend.
measurement_error_mitigation_cls: The approach to mitigate
measurement errors. Qiskit Ignis provides fitter classes for this functionality
and CompleteMeasFitter from qiskit.ignis.mitigation.measurement module can be used
here. (TensoredMeasFitter is not supported).
cals_matrix_refresh_period: How often to refresh the calibration
matrix in measurement mitigation. in minutes
measurement_error_mitigation_shots: The number of shots number for
building calibration matrix. If None, the main `shots` parameter value is used.
job_callback: Optional user supplied callback which can be used
to monitor job progress as jobs are submitted for processing by an Aqua algorithm.
The callback is provided the following arguments: `job_id, job_status,
queue_position, job`
Raises:
AquaError: the shots exceeds the maximum number of shots
AquaError: set noise model but the backend does not support that
AquaError: set backend_options but the backend does not support that
"""
from .deprecation import warn_class
warn_class('aqua.QuantumInstance',
'qiskit.utils.QuantumInstance',
'qiskit-terra')
self._backend = backend
self._pass_manager = pass_manager
# setup run config
if shots is not None:
if self.is_statevector and shots != 1:
logger.info("statevector backend only works with shot=1, changing "
"shots from %s to 1.", shots)
shots = 1
max_shots = self._backend.configuration().max_shots
if max_shots is not None and shots > max_shots:
raise AquaError('The maximum shots supported by the selected backend is {} '
'but you specified {}'.format(max_shots, shots))
run_config = RunConfig(shots=shots, max_credits=max_credits)
if seed_simulator is not None:
run_config.seed_simulator = seed_simulator
self._run_config = run_config
# setup backend config
basis_gates = basis_gates or backend.configuration().basis_gates
coupling_map = coupling_map or getattr(backend.configuration(), 'coupling_map', None)
self._backend_config = {
'basis_gates': basis_gates,
'coupling_map': coupling_map
}
# setup compile config
self._compile_config = {
'initial_layout': initial_layout,
'seed_transpiler': seed_transpiler,
'optimization_level': optimization_level
}
# setup job config
self._qjob_config = {'timeout': timeout} if self.is_local \
else {'timeout': timeout, 'wait': wait}
# setup noise config
self._noise_config = {}
if noise_model is not None:
if is_simulator_backend(self._backend) and not is_basicaer_provider(self._backend):
self._noise_config = {'noise_model': noise_model}
else:
raise AquaError("The noise model is not supported on the selected backend {} ({}) "
"only certain backends, such as Aer qasm simulator "
"support noise.".format(self.backend_name,
self._backend.provider()))
# setup backend options for run
self._backend_options = {}
if backend_options is not None:
if support_backend_options(self._backend):
self._backend_options = {'backend_options': backend_options}
else:
raise AquaError("backend_options can not used with the backends in IBMQ provider.")
# setup measurement error mitigation
self._meas_error_mitigation_cls = None
if self.is_statevector:
if measurement_error_mitigation_cls is not None:
raise AquaError("Measurement error mitigation does not work "
"with the statevector simulation.")
else:
self._meas_error_mitigation_cls = measurement_error_mitigation_cls
self._meas_error_mitigation_fitters: Dict[str, Tuple[np.ndarray, float]] = {}
# TODO: support different fitting method in error mitigation?
self._meas_error_mitigation_method = 'least_squares'
self._cals_matrix_refresh_period = cals_matrix_refresh_period
self._meas_error_mitigation_shots = measurement_error_mitigation_shots
if self._meas_error_mitigation_cls is not None:
logger.info("The measurement error mitigation is enabled. "
"It will automatically submit an additional job to help "
"calibrate the result of other jobs. "
"The current approach will submit a job with 2^N circuits "
"to build the calibration matrix, "
"where N is the number of measured qubits. "
"Furthermore, Aqua will re-use the calibration matrix for %s minutes "
"and re-build it after that.", self._cals_matrix_refresh_period)
# setup others
if is_ibmq_provider(self._backend):
if skip_qobj_validation:
logger.info("skip_qobj_validation was set True but this setting is not "
"supported by IBMQ provider and has been ignored.")
skip_qobj_validation = False
self._skip_qobj_validation = skip_qobj_validation
self._circuit_summary = False
self._job_callback = job_callback
self._time_taken = 0.
logger.info(self)
def __str__(self) -> str:
"""Overload string.
Returns:
str: the info of the object.
"""
# pylint: disable=import-outside-toplevel
from qiskit import __version__ as terra_version
info = "\nQiskit Terra version: {}\n".format(terra_version)
info += "Backend: '{} ({})', with following setting:\n{}\n{}\n{}\n{}\n{}\n{}".format(
self.backend_name, self._backend.provider(), self._backend_config, self._compile_config,
self._run_config, self._qjob_config, self._backend_options, self._noise_config)
info += "\nMeasurement mitigation: {}".format(self._meas_error_mitigation_cls)
return info
def transpile(self,
circuits: Union[QuantumCircuit, List[QuantumCircuit]]) -> List[QuantumCircuit]:
"""
A wrapper to transpile circuits to allow algorithm access the transpiled circuits.
Args:
circuits: circuits to transpile
Returns:
The transpiled circuits, it is always a list even though the length is one.
"""
if self._pass_manager is not None:
transpiled_circuits = self._pass_manager.run(circuits)
else:
transpiled_circuits = compiler.transpile(circuits,
self._backend,
**self._backend_config,
**self._compile_config)
if not isinstance(transpiled_circuits, list):
transpiled_circuits = [transpiled_circuits]
if logger.isEnabledFor(logging.DEBUG) and self._circuit_summary:
logger.debug("==== Before transpiler ====")
logger.debug(summarize_circuits(circuits))
if transpiled_circuits is not None:
logger.debug("==== After transpiler ====")
logger.debug(summarize_circuits(transpiled_circuits))
return transpiled_circuits
def assemble(self,
circuits: Union[QuantumCircuit, List[QuantumCircuit]]) -> Qobj:
""" assemble circuits """
return compiler.assemble(circuits, **self._run_config.to_dict())
def execute(self,
circuits: Union[QuantumCircuit, List[QuantumCircuit]],
had_transpiled: bool = False) -> Result:
"""
A wrapper to interface with quantum backend.
Args:
circuits: circuits to execute
had_transpiled: whether or not circuits had been transpiled
Returns:
Result object
TODO: Maybe we can combine the circuits for the main ones and calibration circuits before
assembling to the qobj.
"""
# pylint: disable=import-outside-toplevel
from .utils.run_circuits import run_qobj
from .utils.measurement_error_mitigation import (get_measured_qubits_from_qobj,
build_measurement_error_mitigation_qobj)
# maybe compile
if not had_transpiled:
circuits = self.transpile(circuits)
# assemble
qobj = self.assemble(circuits)
if self._meas_error_mitigation_cls is not None:
qubit_index, qubit_mappings = get_measured_qubits_from_qobj(qobj)
qubit_index_str = '_'.join([str(x) for x in qubit_index]) + \
"_{}".format(self._meas_error_mitigation_shots or self._run_config.shots)
meas_error_mitigation_fitter, timestamp = \
self._meas_error_mitigation_fitters.get(qubit_index_str, (None, 0.))
if meas_error_mitigation_fitter is None:
# check the asked qubit_index are the subset of build matrices
for key, _ in self._meas_error_mitigation_fitters.items():
stored_qubit_index = [int(x) for x in key.split("_")[:-1]]
stored_shots = int(key.split("_")[-1])
if len(qubit_index) < len(stored_qubit_index):
tmp = list(set(qubit_index + stored_qubit_index))
if sorted(tmp) == sorted(stored_qubit_index) and \
self._run_config.shots == stored_shots:
# the qubit used in current job is the subset and shots are the same
meas_error_mitigation_fitter, timestamp = \
self._meas_error_mitigation_fitters.get(key, (None, 0.))
meas_error_mitigation_fitter = \
meas_error_mitigation_fitter.subset_fitter( # type: ignore
qubit_sublist=qubit_index)
logger.info("The qubits used in the current job is the subset of "
"previous jobs, "
"reusing the calibration matrix if it is not out-of-date.")
build_cals_matrix = self.maybe_refresh_cals_matrix(timestamp) or \
meas_error_mitigation_fitter is None
if build_cals_matrix:
logger.info("Updating qobj with the circuits for measurement error mitigation.")
use_different_shots = not (
self._meas_error_mitigation_shots is None
or self._meas_error_mitigation_shots == self._run_config.shots)
temp_run_config = copy.deepcopy(self._run_config)
if use_different_shots:
temp_run_config.shots = self._meas_error_mitigation_shots
cals_qobj, state_labels, circuit_labels = \
build_measurement_error_mitigation_qobj(qubit_index,
self._meas_error_mitigation_cls,
self._backend,
self._backend_config,
self._compile_config,
temp_run_config)
if use_different_shots or is_aer_qasm(self._backend):
cals_result = run_qobj(cals_qobj, self._backend, self._qjob_config,
self._backend_options,
self._noise_config,
self._skip_qobj_validation, self._job_callback)
self._time_taken += cals_result.time_taken
result = run_qobj(qobj, self._backend, self._qjob_config,
self._backend_options, self._noise_config,
self._skip_qobj_validation, self._job_callback)
self._time_taken += result.time_taken
else:
# insert the calibration circuit into main qobj if the shots are the same
qobj.experiments[0:0] = cals_qobj.experiments
result = run_qobj(qobj, self._backend, self._qjob_config,
self._backend_options, self._noise_config,
self._skip_qobj_validation, self._job_callback)
self._time_taken += result.time_taken
cals_result = result
logger.info("Building calibration matrix for measurement error mitigation.")
meas_error_mitigation_fitter = \
self._meas_error_mitigation_cls(cals_result,
state_labels,
qubit_list=qubit_index,
circlabel=circuit_labels)
self._meas_error_mitigation_fitters[qubit_index_str] = \
(meas_error_mitigation_fitter, time.time())
else:
result = run_qobj(qobj, self._backend, self._qjob_config,
self._backend_options, self._noise_config,
self._skip_qobj_validation, self._job_callback)
self._time_taken += result.time_taken
if meas_error_mitigation_fitter is not None:
logger.info("Performing measurement error mitigation.")
skip_num_circuits = len(result.results) - len(circuits)
# remove the calibration counts from result object to assure the length of
# ExperimentalResult is equal length to input circuits
result.results = result.results[skip_num_circuits:]
tmp_result = copy.deepcopy(result)
for qubit_index_str, c_idx in qubit_mappings.items():
curr_qubit_index = [int(x) for x in qubit_index_str.split("_")]
tmp_result.results = [result.results[i] for i in c_idx]
if curr_qubit_index == qubit_index:
tmp_fitter = meas_error_mitigation_fitter
else:
tmp_fitter = \
meas_error_mitigation_fitter.subset_fitter( # type: ignore
curr_qubit_index)
tmp_result = tmp_fitter.filter.apply( # type: ignore
tmp_result, self._meas_error_mitigation_method
)
for i, n in enumerate(c_idx):
result.results[n] = tmp_result.results[i]
else:
result = run_qobj(qobj, self._backend, self._qjob_config,
self._backend_options, self._noise_config,
self._skip_qobj_validation, self._job_callback)
self._time_taken += result.time_taken
if self._circuit_summary:
self._circuit_summary = False
return result
def set_config(self, **kwargs):
"""Set configurations for the quantum instance."""
for k, v in kwargs.items():
if k in QuantumInstance._RUN_CONFIG:
setattr(self._run_config, k, v)
elif k in QuantumInstance._QJOB_CONFIG:
self._qjob_config[k] = v
elif k in QuantumInstance._COMPILE_CONFIG:
self._compile_config[k] = v
elif k in QuantumInstance._BACKEND_CONFIG:
self._backend_config[k] = v
elif k in QuantumInstance._BACKEND_OPTIONS:
if not support_backend_options(self._backend):
raise AquaError("backend_options can not be used with this backend "
"{} ({}).".format(self.backend_name, self._backend.provider()))
if k in QuantumInstance._BACKEND_OPTIONS_QASM_ONLY and self.is_statevector:
raise AquaError("'{}' is only applicable for qasm simulator but "
"statevector simulator is used as the backend.")
if 'backend_options' not in self._backend_options:
self._backend_options['backend_options'] = {}
self._backend_options['backend_options'][k] = v
elif k in QuantumInstance._NOISE_CONFIG:
if not is_simulator_backend(self._backend) or is_basicaer_provider(self._backend):
raise AquaError(
"The noise model is not supported on the selected backend {} ({}) "
"only certain backends, such as Aer qasm support "
"noise.".format(self.backend_name, self._backend.provider()))
self._noise_config[k] = v
else:
raise ValueError("unknown setting for the key ({}).".format(k))
@property
def time_taken(self) -> float:
"""Accumulated time taken for execution."""
return self._time_taken
def reset_execution_results(self) -> None:
""" Reset execution results """
self._time_taken = 0.
@property
def qjob_config(self):
"""Getter of qjob_config."""
return self._qjob_config
@property
def backend_config(self):
"""Getter of backend_config."""
return self._backend_config
@property
def compile_config(self):
"""Getter of compile_config."""
return self._compile_config
@property
def run_config(self):
"""Getter of run_config."""
return self._run_config
@property
def noise_config(self):
"""Getter of noise_config."""
return self._noise_config
@property
def backend_options(self):
"""Getter of backend_options."""
return self._backend_options
@property
def circuit_summary(self):
"""Getter of circuit summary."""
return self._circuit_summary
@circuit_summary.setter
def circuit_summary(self, new_value):
""" sets circuit summary """
self._circuit_summary = new_value
@property
def measurement_error_mitigation_cls(self): # pylint: disable=invalid-name
""" returns measurement error mitigation cls """
return self._meas_error_mitigation_cls
@measurement_error_mitigation_cls.setter
def measurement_error_mitigation_cls(self, new_value): # pylint: disable=invalid-name
""" sets measurement error mitigation cls """
self._meas_error_mitigation_cls = new_value
@property
def cals_matrix_refresh_period(self):
""" returns matrix refresh period """
return self._cals_matrix_refresh_period
@cals_matrix_refresh_period.setter
def cals_matrix_refresh_period(self, new_value):
""" sets matrix refresh period """
self._cals_matrix_refresh_period = new_value
@property
def measurement_error_mitigation_shots(self): # pylint: disable=invalid-name
""" returns measurement error mitigation shots """
return self._meas_error_mitigation_shots
@measurement_error_mitigation_shots.setter
def measurement_error_mitigation_shots(self, new_value): # pylint: disable=invalid-name
""" sets measurement error mitigation shots """
self._meas_error_mitigation_shots = new_value
@property
def backend(self):
"""Return BaseBackend backend object."""
return self._backend
@property
def backend_name(self):
"""Return backend name."""
return self._backend.name()
@property
def is_statevector(self):
"""Return True if backend is a statevector-type simulator."""
return is_statevector_backend(self._backend)
@property
def is_simulator(self):
"""Return True if backend is a simulator."""
return is_simulator_backend(self._backend)
@property
def is_local(self):
"""Return True if backend is a local backend."""
return is_local_backend(self._backend)
@property
def skip_qobj_validation(self):
""" checks if skip qobj validation """
return self._skip_qobj_validation
@skip_qobj_validation.setter
def skip_qobj_validation(self, new_value):
""" sets skip qobj validation flag """
self._skip_qobj_validation = new_value
def maybe_refresh_cals_matrix(self,
timestamp: Optional[float] = None) -> bool:
"""
Calculate the time difference from the query of last time.
Args:
timestamp: timestamp
Returns:
Whether or not refresh the cals_matrix
"""
timestamp = timestamp or 0.
ret = False
curr_timestamp = time.time()
difference = int(curr_timestamp - timestamp) / 60.0
if difference > self._cals_matrix_refresh_period:
ret = True
return ret
def cals_matrix(self,
qubit_index: Optional[List[int]] = None) -> \
Optional[Union[Tuple[np.ndarray, float], Dict[str, Tuple[np.ndarray, float]]]]:
"""
Get the stored calibration matrices and its timestamp.
Args:
qubit_index: the qubit index of corresponding calibration matrix.
If None, return all stored calibration matrices.
Returns:
The calibration matrix and the creation timestamp if qubit_index
is not None otherwise, return all matrices and their timestamp
in a dictionary.
"""
shots = self._meas_error_mitigation_shots or self._run_config.shots
if qubit_index:
qubit_index_str = '_'.join([str(x) for x in qubit_index]) + "_{}".format(shots)
fitter, timestamp = self._meas_error_mitigation_fitters.get(qubit_index_str, None)
if fitter is not None:
return fitter.cal_matrix, timestamp # type: ignore
else:
return {k: (v.cal_matrix, t) for k, (v, t) # type: ignore
in self._meas_error_mitigation_fitters.items()}
return None
| 46.287805
| 102
| 0.597745
|
4a0607ae66550506dccafaa1d750e5ae53d907a2
| 12,827
|
py
|
Python
|
catamount/tests/full/tf_word2vec.py
|
jthestness/catamount
|
9de3090f1a02a04774f28a0d10f677a76f50446f
|
[
"Apache-2.0"
] | null | null | null |
catamount/tests/full/tf_word2vec.py
|
jthestness/catamount
|
9de3090f1a02a04774f28a0d10f677a76f50446f
|
[
"Apache-2.0"
] | 2
|
2021-05-18T20:31:42.000Z
|
2021-05-18T20:43:43.000Z
|
catamount/tests/full/tf_word2vec.py
|
jthestness/catamount
|
9de3090f1a02a04774f28a0d10f677a76f50446f
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import numpy as np
import pickle
import re
import sympy
import sys
sys.setrecursionlimit(50000)
from catamount.api import utils
import catamount.frameworks.tensorflow
from catamount.ops.constant import *
from catamount.ops.unknown_op import UnknownOp
from catamount.ops.variable import *
is_pytest_run = False
def test_tf_w2v_model():
global is_pytest_run
is_pytest_run = True
run_tf_w2v_model()
def run_tf_w2v_model():
global is_pytest_run
graph_meta = 'catamount/frameworks/example_graphs/tensorflow/full_models/language_models/word2vec_n200-latest_model.meta'
graph = catamount.frameworks.tensorflow.import_graph(graph_meta)
assert graph.isValid()
# ============ TO REMOVE INITIALIZATION OPS! =============
# NOTE: This code is pretty general and is likely to be migrated into
# Catamount code for removing TF-specific initialization ops
from catamount.ops import AssignOp
from catamount.ops import VariableOp
assign_ops = set()
for op in graph.opsByName.values():
if isinstance(op, AssignOp):
assign_ops.add(op)
for assign_op in assign_ops:
my_ancestors = set()
my_frontier = set()
my_frontier.add(assign_op)
while len(my_frontier) > 0:
next_op = my_frontier.pop()
for in_tensor in next_op.inputs:
if not isinstance(in_tensor.producer, VariableOp):
my_frontier.add(in_tensor.producer)
my_ancestors.add(next_op)
for next_op in my_ancestors:
graph.removeOp(next_op)
assert graph.isValid()
# Next, remove ops that are not executed during a standard training step:
graph_ops = list(graph._ops_by_name.values())
for op in graph_ops:
# Certain ops are only used for inference
if 'Model/NceLoss_1_3/' in op.name or \
'Model/Collapse_1/' in op.name or \
'Model/Embedding_1_3/' in op.name or \
'Model/Labels_1/' in op.name or \
'Model/SkipGramSampler_1/' in op.name or \
'Model/Mask_1/' in op.name:
graph.removeOp(op)
elif \
op.name == 'Model/Cast_1' or \
op.name == 'Model/Sum_1' or \
op.name == 'Model/Size_1' or \
op.name == 'Model/Exp_1' or \
op.name == 'Model/truediv_2' or \
op.name == 'Model/truediv_3':
graph.removeOp(op)
if not is_pytest_run:
print('Initial graph:\n{}\n'.format(graph))
init_params = graph.calcModelParameters()
print('Initial parameters: {}'.format(init_params))
print('Initial Flops: {}\n'.format(graph.calcAlgFlops()))
print('Placeholders:')
for op in graph.getPlaceholders():
print(op.debugString())
print('')
# Set up symbols to name dimensions
skip_window_symbol = utils.getPositiveIntSymbolFromString('skip_window')
num_skips_symbol = utils.getPositiveIntSymbolFromString('num_skips')
nce_samples_symbol = utils.getPositiveIntSymbolFromString('nce_samples')
hidden_dim_symbol = utils.getIntSymbolFromString('hidden_dim')
vocab_size_symbol = utils.getIntSymbolFromString('vocab_size')
subbatch_size_symbol = utils.getIntSymbolFromString('subbatch_size')
sequence_length_symbol = utils.getIntSymbolFromString('sequence_length')
batch_times_seq_symbol = sequence_length_symbol * subbatch_size_symbol
graph_iters_symbol = utils.getIntSymbolFromString('graph::iters')
# For simplicity, assign samples symbol in the op
nce_samp_op = graph.opsByName['Model/NceLoss_1_1/nce_loss/LogUniformCandidateSampler']
nce_samp_op._num_samples_symbol = nce_samples_symbol
# Convert these constant dimensions to symbols
base_skip_window = 8
base_num_skips = 8
base_nce_samples = 64
base_hidden_dim = 400
base_vocab_size = 40004
base_sequence_length = 32
base_subbatch_size = 1
# Find and set constants that contain model hyperparameters
const_dict = { 'Model/Gradient/Compute/gradients/Model/NceLoss_1_1/nce_loss/sub_1_grad/Shape_1': [nce_samples_symbol],
'Model/SkipGramSampler/Const': 2 * skip_window_symbol,
'Model/SkipGramSampler/strided_slice/stack': [0, skip_window_symbol],
'Model/SkipGramSampler/strided_slice/stack_1': [0, -skip_window_symbol],
'Model/Collapse/Reshape/shape': [-1, hidden_dim_symbol],
'Model/Gradient/Compute/gradients/Model/Embedding_1_1/Gather_grad/Shape': [vocab_size_symbol, hidden_dim_symbol],
'Model/Gradient/Compute/gradients/Model/NceLoss_1_1/nce_loss/embedding_lookup_1_grad/Shape': [vocab_size_symbol],
'Model/Gradient/Compute/gradients/Model/NceLoss_1_1/nce_loss/embedding_lookup_grad/Shape': [vocab_size_symbol, hidden_dim_symbol],
'Model/Mask/NotEqual/y': vocab_size_symbol - 3,
'Model/SkipGramSampler/Const_2': num_skips_symbol,
'Model/SkipGramSampler/Tile_1/multiples': [1, num_skips_symbol],
'Model/SkipGramSampler/Tile/multiples': [1, num_skips_symbol],
}
graph.bindConstantValues(const_dict)
# Next, bind the constant, placeholder, and variable shapes and propagate
bind_dict = { # Constants
# Placeholders
'Input/Input': [subbatch_size_symbol, sequence_length_symbol],
'Labels/Labels': [subbatch_size_symbol, sequence_length_symbol],
# Variables
'Model/NceLoss_1/b_Softmax': [vocab_size_symbol],
'Model/NceLoss_1/W_Softmax': [vocab_size_symbol, hidden_dim_symbol],
'Model/Embedding_1/EmbeddingWeights': [vocab_size_symbol, hidden_dim_symbol],
}
print('Binding variables')
# HACK: For now, manually set GatherNd op shapes. Later, implement GatherNd
gnd_op = graph.opsByName['Model/SkipGramSampler/GatherNd']
gnd_op.outputs[0].mergeShape([subbatch_size_symbol, num_skips_symbol * (sequence_length_symbol - 2 * skip_window_symbol)])
graph.bindShapesAndPropagate(bind_dict, warn_if_ill_defined=(not is_pytest_run), make_symbolic=True)
assert graph.isValid()
if not is_pytest_run:
print('\n\nCleaned Graph:\n{}'.format(graph))
print('\n\nBound values')
bind_subs = {
graph_iters_symbol: 1,
hidden_dim_symbol: base_hidden_dim,
sequence_length_symbol: base_sequence_length,
subbatch_size_symbol: base_subbatch_size,
vocab_size_symbol: base_vocab_size,
skip_window_symbol: base_skip_window,
num_skips_symbol: base_num_skips,
nce_samples_symbol: base_nce_samples,
}
# Verify parameter counts first
parameters = graph.calcModelParameters()
correct_params = 32043205
correct_flops = 21148823
correct_bytes = 23762537
correct_total_footprint = 137949925
print('Symbol associations: {}\n'.format(bind_subs))
# Calculate model parameter count
resolved_params = parameters.subs(bind_subs)
try:
resolved_params = int(resolved_params)
except:
print('ERROR: resolved_params should be int, but is {} = {}'.format(
type(resolved_params), resolved_params))
assert resolved_params == correct_params, \
'Incorrect model params: {}'.format(resolved_params)
print('Parameters: {}\nWith specified dims: {}\n'.format(parameters, resolved_params))
# Calculate algorithmic Flops
alg_flops = graph.calcAlgFlops()
resolved_flops = alg_flops.subs(bind_subs)
try:
resolved_flops = int(resolved_flops)
except:
print('ERROR: resolved_flops should be int, but is {} = {}'.format(
type(resolved_flops), resolved_flops))
assert resolved_flops == correct_flops, \
'Incorrect algorithmic flops: {}'.format(resolved_flops)
print('Algorithmic Flops: {}\nWith specified dims: {}\n'.format(alg_flops, resolved_flops))
# Calculate algorthmic Bytes accessed
alg_bytes = graph.calcAlgBytes()
resolved_bytes = alg_bytes.subs(bind_subs)
try:
resolved_bytes = int(resolved_bytes)
except:
print('ERROR: resolved_bytes should be int, but is {} = {}'.format(
type(resolved_bytes), resolved_bytes))
assert resolved_bytes == correct_bytes, \
'Incorrect algorithmic bytes: {}'.format(resolved_bytes)
print('Alg bytes accessed: {}\nWith specified dims: {}\n'.format(alg_bytes, resolved_bytes))
# Calculate total memory footprint
alg_footprint = graph.calcAlgFootprint()
resolved_footprint = alg_footprint.subs(bind_subs)
try:
resolved_footprint = int(resolved_footprint)
except:
print('ERROR: resolved_footprint should be int, but is {} = {}'.format(
type(resolved_footprint), resolved_footprint))
assert resolved_footprint == correct_total_footprint, \
'Incorrect algorithmic footprint: {}'.format(resolved_footprint)
print('Alg mem footprint: {}\nWith specified dims: {}\n'.format(alg_footprint, resolved_footprint))
# Calculate minimal memory footprint
alg_min_footprint = graph.calcMinimalFootprint(symbol_subs=bind_subs)
print('Alg minimal footprint (With specified dims): {}\n'.format(alg_min_footprint))
# Calculate algorithmic IO per step
total_io_footprint = 0
for op in graph.getPlaceholders():
total_io_footprint += op.calcAlgFootprint()
if isinstance(total_io_footprint, int):
resolved_io_footprint = total_io_footprint
else:
resolved_io_footprint = total_io_footprint.subs(bind_subs)
print('Alg IO footprint: {}\nWith specified dims: {}\n'.format(total_io_footprint, resolved_io_footprint))
if not is_pytest_run:
print('VERBOSE ALGORTHMIC FLOPS:')
graph.calcAlgFlops(verbose=True)
print('')
print('VERBOSE ALGORTHMIC BYTES:')
graph.calcAlgBytes(verbose=True)
print('')
print('VERBOSE ALGORTHMIC FOOTPRINT:')
graph.calcAlgFootprint(verbose=True)
print('')
# HACKY WAY TO SAVE MODELS FOR NOW!
pickle.dump(graph, open('catamount/frameworks/example_graphs/tensorflow/full_models/language_models/graph_word2vec.p', 'wb'))
if is_pytest_run:
return
print('\n\n======= Algorithmic graph-level analytics: =======')
hidden_dims = [1, 2, 3, 4, 5, 6, 7, 9, 10, 12, 14, 18, 20, 25, 28, 35, 40, 50, 56, 69, 78, 86, 96, 108, 119, 123, 133, 148, 163, 182, 202, 221, 246, 273, 297, 329, 330, 364, 396, 436, 437, 520, 572, 617, 676, 740, 796, 869, 948, 1017, 1106, 1202, 1286, 1394, 1510, 1611, 1742, 1882, 2004, 2161, 2476, 3040, 3714, 4520, 5478, 6628, 8019, 9702, 11739, 14204, 17186, 20795, 25161, 30444, 36837, 38100]
bind_subs.pop(hidden_dim_symbol)
resolved_params = parameters.subs(bind_subs)
print('Symbol associations: {}\n'.format(bind_subs))
print('Algorithmic Flops by hidden dimension, params, and per-batch-sample:')
resolved_flops = alg_flops.subs(bind_subs)
for hid_dim in hidden_dims:
graph_params = resolved_params.subs({hidden_dim_symbol: hid_dim})
graph_flops = resolved_flops.subs({hidden_dim_symbol: hid_dim})
graph_flops_per_sample = float(graph_flops) / \
bind_subs[subbatch_size_symbol]
print('{}\t{}\t{}\t{}'.format(hid_dim, graph_params, graph_flops,
int(graph_flops_per_sample)))
print('\nAlgorithmic bytes accessed by hidden dimension, params:')
resolved_bytes = alg_bytes.subs(bind_subs)
for hid_dim in hidden_dims:
graph_params = resolved_params.subs({hidden_dim_symbol: hid_dim})
graph_bytes = resolved_bytes.subs({hidden_dim_symbol: hid_dim})
print('{}\t{}\t{}'.format(hid_dim, graph_params, graph_bytes))
print('\nAlgorithmic total memory footprint by hidden dimension, params:')
resolved_footprint = alg_footprint.subs(bind_subs)
for hid_dim in hidden_dims:
graph_params = resolved_params.subs({hidden_dim_symbol: hid_dim})
graph_footprint = resolved_footprint.subs({hidden_dim_symbol: hid_dim})
print('{}\t{}\t{}'.format(hid_dim, graph_params, graph_footprint))
print('\nAlgorithmic minimal memory footprint by hidden dimension, params:')
full_subs = dict(bind_subs)
for hid_dim in hidden_dims:
graph_params = resolved_params.subs({hidden_dim_symbol: hid_dim})
full_subs[hidden_dim_symbol] = hid_dim
graph_min_foot = graph.calcMinimalFootprint(symbol_subs=full_subs)
print('{}\t{}\t{}'.format(hid_dim, graph_params, graph_min_foot))
if __name__ == "__main__":
test_tf_w2v_model()
| 43.043624
| 402
| 0.682701
|
4a06095a9535022895b7813a89fd0c87995d6625
| 5,697
|
py
|
Python
|
edu/class3/vit_answer.py
|
h1063135843/PaddleViT
|
6f150b82d801b082cc7af09af396bfe2f6bf9987
|
[
"Apache-2.0"
] | 1
|
2021-12-12T12:34:01.000Z
|
2021-12-12T12:34:01.000Z
|
edu/class3/vit_answer.py
|
h1063135843/PaddleViT
|
6f150b82d801b082cc7af09af396bfe2f6bf9987
|
[
"Apache-2.0"
] | null | null | null |
edu/class3/vit_answer.py
|
h1063135843/PaddleViT
|
6f150b82d801b082cc7af09af396bfe2f6bf9987
|
[
"Apache-2.0"
] | null | null | null |
# ViT Online Class
# Author: Dr. Zhu
# Project: PaddleViT (https:///github.com/BR-IDL/PaddleViT)
# 2021.11
import copy
import paddle
import paddle.nn as nn
class Identity(nn.Layer):
def __init__(self):
super().__init__()
def forward(self, x):
return x
class Mlp(nn.Layer):
def __init__(self, embed_dim, mlp_ratio, dropout=0.):
super().__init__()
self.fc1 = nn.Linear(embed_dim, int(embed_dim * mlp_ratio))
self.fc2 = nn.Linear(int(embed_dim * mlp_ratio), embed_dim)
self.act = nn.GELU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class PatchEmbedding(nn.Layer):
def __init__(self, image_size=224, patch_size=16, in_channels=3, embed_dim=768, dropout=0.):
super().__init__()
n_patches = (image_size // patch_size) * (image_size // patch_size)
self.patch_embedding = nn.Conv2D(in_channels=in_channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size)
self.position_embeddings = paddle.create_parameter(
shape=[1, n_patches + 1, embed_dim],
dtype='float32',
default_initializer=nn.initializer.TruncatedNormal(std=.02))
self.cls_token = paddle.create_parameter(
shape=[1, 1, embed_dim],
dtype='float32',
default_initializer=nn.initializer.Constant(0))
self.dropout = nn.Dropout(dropout)
def forward(self, x):
# [n, c, h, w]
cls_tokens = self.cls_token.expand((x.shape[0], -1, -1))
x = self.patch_embedding(x) # [n, c', h', w']
x = x.flatten(2) # [n, c', h'*w']
x = x.transpose([0, 2, 1]) # [n, h'*w', c']
x = paddle.concat((cls_tokens, x), axis=1)
embeddings = x + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
class Attention(nn.Layer):
"""multi-head self attention"""
def __init__(self, embed_dim, num_heads, qkv_bias=True, dropout=0., attention_dropout=0.):
super().__init__()
self.num_heads = num_heads
self.head_dim = int(embed_dim / num_heads)
self.all_head_dim = self.head_dim * num_heads
self.scales = self.head_dim ** -0.5
self.qkv = nn.Linear(embed_dim,
self.all_head_dim * 3)
self.proj = nn.Linear(embed_dim, embed_dim)
self.dropout = nn.Dropout(dropout)
self.attention_dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(axis=-1)
def transpose_multihead(self, x):
# x: [N, num_patches, all_head_dim] -> [N, n_heads, num_patches, head_dim]
new_shape = x.shape[:-1] + [self.num_heads, self.head_dim]
x = x.reshape(new_shape)
x = x.transpose([0, 2, 1, 3])
return x
def forward(self, x):
B, N, _ = x.shape
# x -> [N, num_patches, dim]
# x -> q, k, v
qkv = self.qkv(x).chunk(3, axis=-1) # list of tensors
q, k, v = map(self.transpose_multihead, qkv)
attn = paddle.matmul(q, k, transpose_y=True) # q * k'
attn = attn * self.scales
attn = self.softmax(attn)
attn = self.attention_dropout(attn)
out = paddle.matmul(attn, v)
out = out.transpose([0, 2, 1, 3])
out = out.reshape([B, N, -1])
out = self.proj(out)
out = self.dropout(out)
return out
class EncoderLayer(nn.Layer):
def __init__(self, embed_dim=768, num_heads=4, qkv_bias=True, mlp_ratio=4.0, dropout=0., attention_dropout=0.):
super().__init__()
self.attn_norm = nn.LayerNorm(embed_dim)
self.attn = Attention(embed_dim, num_heads)
self.mlp_norm = nn.LayerNorm(embed_dim)
self.mlp = Mlp(embed_dim, mlp_ratio)
def forward(self, x):
h = x
x = self.attn_norm(x)
x = self.attn(x)
x = x + h
h = x
x = self.mlp_norm(x)
x = self.mlp(x)
x = x + h
return x
class Encoder(nn.Layer):
def __init__(self, embed_dim, depth):
super().__init__()
layer_list = []
for i in range(depth):
encoder_layer = EncoderLayer()
layer_list.append(encoder_layer)
self.layers = nn.LayerList(layer_list)
self.norm = nn.LayerNorm(embed_dim)
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = self.norm(x)
return x
class VisualTransformer(nn.Layer):
def __init__(self,
image_size=224,
patch_size=16,
in_channels=3,
num_classes=1000,
embed_dim=768,
depth=3,
num_heads=8,
mlp_ratio=4,
qkv_bias=True,
dropout=0.,
attention_dropout=0.,
droppath=0.):
super().__init__()
self.patch_embedding = PatchEmbedding(image_size, patch_size, in_channels, embed_dim)
self.encoder = Encoder(embed_dim, depth)
self.classifier = nn.Linear(embed_dim, num_classes)
def forward(self, x):
x = self.patch_embedding(x)
x = self.encoder(x)
x = self.classifier(x[:, 0])
return x
def main():
vit = VisualTransformer()
print(vit)
paddle.summary(vit, (4, 3, 224, 224)) # must be tuple
if __name__ == "__main__":
main()
| 29.518135
| 115
| 0.560821
|
4a060a5fe19d83183ca65dc405df06de0aec0fc6
| 798
|
py
|
Python
|
timesketch/lib/analyzers/sigma_tagger_test.py
|
dmdicki/timesketch
|
f0ae4230a88edbe62aa0ad1ce74b7dda844df731
|
[
"Apache-2.0"
] | 1
|
2020-03-06T18:20:53.000Z
|
2020-03-06T18:20:53.000Z
|
timesketch/lib/analyzers/sigma_tagger_test.py
|
dmdicki/timesketch
|
f0ae4230a88edbe62aa0ad1ce74b7dda844df731
|
[
"Apache-2.0"
] | null | null | null |
timesketch/lib/analyzers/sigma_tagger_test.py
|
dmdicki/timesketch
|
f0ae4230a88edbe62aa0ad1ce74b7dda844df731
|
[
"Apache-2.0"
] | 1
|
2021-11-16T00:01:18.000Z
|
2021-11-16T00:01:18.000Z
|
"""Tests for SigmaPlugin."""
from __future__ import unicode_literals
import mock
from timesketch.lib.analyzers import sigma_tagger
from timesketch.lib.testlib import BaseTest
from timesketch.lib.testlib import MockDataStore
class TestSigmaPlugin(BaseTest):
"""Tests the functionality of the analyzer."""
def __init__(self, *args, **kwargs):
super(TestSigmaPlugin, self).__init__(*args, **kwargs)
self.test_index = 'test_index'
# Mock the Elasticsearch datastore.
@mock.patch(
'timesketch.lib.analyzers.interface.ElasticsearchDataStore',
MockDataStore)
def test_analyzer(self):
"""Test analyzer."""
# TODO: Add more tests
_ = sigma_tagger.LinuxRulesSigmaPlugin(
sketch_id=1, index_name=self.test_index)
| 27.517241
| 68
| 0.705514
|
4a060a8d352289575fd1cc751e41166c3de46ade
| 6,470
|
py
|
Python
|
src/main.py
|
MayD524/May-s-2D-Adventure
|
4038b1f2fcbddca9ae526fc7ba5e3e3a98d65a7a
|
[
"MIT"
] | null | null | null |
src/main.py
|
MayD524/May-s-2D-Adventure
|
4038b1f2fcbddca9ae526fc7ba5e3e3a98d65a7a
|
[
"MIT"
] | null | null | null |
src/main.py
|
MayD524/May-s-2D-Adventure
|
4038b1f2fcbddca9ae526fc7ba5e3e3a98d65a7a
|
[
"MIT"
] | null | null | null |
from game_files.projectile import mayProjectile
from game_files.gameHandler import GameHandler
from game_files.collectables import *
from game_files.gameConsts import *
from game_files.floor import mayFloor
from game_files.player import player
from game_files.npc import mayNPC
import pyxel
import json
class App(GameHandler):
def __init__(self):
GameHandler.__init__(self)
pyxel.init(208, 160, caption="Pyxel Game", fps=DEFAULT_FPS)
pyxel.load("my_resource.pyxres")
self.scene = SCENE_PLAYING
## i may change this later
with open("./levels/level_selector.json") as f:
self.level_selc = json.load(f)
self.load_level("level_1")
#self.game_Init()
pyxel.run(self.update, self.draw)
def get_level_path(self, level:str) -> str:
return self.level_selc[level]
def load_level(self, level:str) -> None:
with open(self.level_selc[level],'rb') as f:
byte_list = f.read()
start_x = 0
start_y = 0
prev_byte = 0
prev_cnt = 8
## 20 x 25
cur_x = 0
cur_y = 0
for (i, byte) in enumerate(byte_list):
if byte is prev_byte and not i == len(byte_list) - 1:
prev_cnt += 8
else:
if prev_byte == 20: ## Master Floor
floor = mayFloor(start_x, start_y, prev_cnt+8, 8)
floor.name = "master_floor"
self.gameObjects.append(floor)
elif prev_byte == 34: ## Grass Floor
floor = mayFloor(start_x, start_y, prev_cnt+8, 8)
floor.name = "grass_floor_generic"
self.gameObjects.append(floor)
elif prev_byte == 143: ## Anti Floor
floor = mayFloor(start_x, start_y, prev_cnt+8, 8)
floor.name = "grass_floor_anti"
floor.isInverted = True
self.gameObjects.append(floor)
elif prev_byte == 240: ## Player
self.player = player(start_x, start_y, TILEOFFSET + 1, TILEOFFSET + 1, p_health=PLAYER_DEFAULT_HEALTH)
elif prev_byte == 201 or prev_byte == 85: ## NPC Spawn
npc = mayNPC(start_x, start_y, 8, 8, 100, .4, NPC_SIMPLE_ENEMY if prev_byte == 201 else NPC_RANGED_ENEMY ,"enemy-npc-1")
self.gameObjects.append(npc)
elif prev_byte == 154: ## Coin
self.gameObjects.append(mayCoin(start_x, start_y))
elif prev_byte == 148: ## Health Kit
self.gameObjects.append(mayHealthKit(start_x, start_y))
prev_byte = byte
prev_cnt = 8
start_x = cur_x
start_y = cur_y
cur_x += 8
if cur_x >= 208:
cur_y += 8
cur_x = 0
## made a separate function so that we can call it later
def game_Init(self) -> None:
self.gameObjects = []
self.scene = SCENE_PLAYING
self.score = 0
self.pHealth = PLAYER_DEFAULT_HEALTH
gameFloor = mayFloor(0, pyxel.height - 20, 208, 20)
gameFloor.name = "master_floor"
testFloor = mayFloor(10, pyxel.height - 28, 48, 8)
test2Floor = mayFloor(60, pyxel.height - 50, 48, 8)
test3Floor = mayFloor(110, 30, 16, 8)
self.gameObjects.append(mayCoin(60, 120))
self.gameObjects.append(mayHealthKit(80, 120))
test2Floor.imgID = 1
test2Floor.name = "test2_floor"
testFloor.name = "test_floor"
testFloor.imgID = 1
self.gameObjects.append(gameFloor)
self.gameObjects.append(testFloor)
self.gameObjects.append(test2Floor)
self.gameObjects.append(test3Floor)
#npc = mayNPC(30, pyxel.height - 50, 8, 8, 100, .4, NPC_SIMPLE_ENEMY ,"enemy-npc-1")
#self.gameObjects.append(npc)
npc = mayNPC(60, pyxel.height - 50, 8, 8, 100, .4, NPC_RANGED_ENEMY ,"enemy-npc-1")
self.gameObjects.append(npc)
self.player = player(pyxel.width / 2, pyxel.height - 40, TILEOFFSET + 1, TILEOFFSET + 1, p_health=self.pHealth)
def update(self) -> None:
if pyxel.btnp(pyxel.KEY_Q):
pyxel.quit()
if pyxel.btnp(pyxel.KEY_R):
proj = mayProjectile(0, pyxel.height - 50, 10, 5, 6, 10, 1, 10)
self.gameObjects.append(proj)
if self.scene == SCENE_PLAYING:
self.update_play()
elif self.scene == SCENE_END:
self.update_gameOver()
def draw(self) -> None:
pyxel.cls(0)
if self.scene == SCENE_PLAYING:
self.draw_play_scene()
elif self.scene == SCENE_END:
self.draw_game_over()
def update_gameOver(self) -> None:
if pyxel.btnp(pyxel.KEY_Q):
pyxel.quit()
if pyxel.btnp(pyxel.KEY_ENTER):
self.game_Init()
def update_play(self) -> None:
self.player._update()
self.score = self.player.score
self.pHealth = self.player.health
if self.pHealth <= 0:
self.scene = SCENE_END
self.check_collision(self.player)
self.updateList(self.gameObjects)
self._cleanup()
def draw_game_over(self) -> None:
pyxel.text(pyxel.width / 2, pyxel.height / 2, "GAME OVER", 7)
pyxel.text(pyxel.width / 2, (pyxel.height / 2) + 8, f"Score: {self.score}", 6)
def draw_play_scene(self) -> None:
self.player._draw()
pyxel.text(0, 0, f"Score: {self.score}", 6)
pyxel.text(0, 8, f"Health: {self.pHealth}", 6)
self.drawList(self.gameObjects)
if __name__ == "__main__":
App()
| 36.145251
| 142
| 0.510819
|
4a060b631c8c4a94a492929f476c36d24069d91e
| 15,479
|
py
|
Python
|
tardis/plasma/base.py
|
bartnikm/tardis-bartnikm
|
2b0f3110fefd6740349ca7b33fe72bf025c88452
|
[
"BSD-3-Clause"
] | null | null | null |
tardis/plasma/base.py
|
bartnikm/tardis-bartnikm
|
2b0f3110fefd6740349ca7b33fe72bf025c88452
|
[
"BSD-3-Clause"
] | null | null | null |
tardis/plasma/base.py
|
bartnikm/tardis-bartnikm
|
2b0f3110fefd6740349ca7b33fe72bf025c88452
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import re
import logging
import tempfile
import fileinput
import networkx as nx
from tardis.plasma.exceptions import PlasmaMissingModule, NotInitializedModule
from tardis.plasma.properties.base import *
from tardis.io.util import PlasmaWriterMixin
logger = logging.getLogger(__name__)
class BasePlasma(PlasmaWriterMixin):
outputs_dict = {}
hdf_name = "plasma"
def __init__(self, plasma_properties, property_kwargs=None, **kwargs):
self.outputs_dict = {}
self.input_properties = []
self.plasma_properties = self._init_properties(
plasma_properties, property_kwargs, **kwargs
)
self._build_graph()
self.update(**kwargs)
def __getattr__(self, item):
if item in self.outputs_dict:
return self.get_value(item)
else:
super(BasePlasma, self).__getattribute__(item)
def __setattr__(self, key, value):
if key != "module_dict" and key in self.outputs_dict:
raise AttributeError(
"Plasma inputs can only be updated using " "the 'update' method"
)
else:
super(BasePlasma, self).__setattr__(key, value)
def __dir__(self):
attrs = [item for item in self.__dict__ if not item.startswith("_")]
attrs += [
item for item in self.__class__.__dict__ if not item.startswith("_")
]
attrs += self.outputs_dict.keys()
return attrs
@property
def plasma_properties_dict(self):
return {item.name: item for item in self.plasma_properties}
def get_value(self, item):
return getattr(self.outputs_dict[item], item)
def _build_graph(self):
"""
Builds the directed Graph using network X
:param plasma_modules:
:return:
"""
self.graph = nx.DiGraph()
# Adding all nodes
self.graph.add_nodes_from(
[
(plasma_property.name, {})
for plasma_property in self.plasma_properties
]
)
# Flagging all input modules
self.input_properties = [
item
for item in self.plasma_properties
if not hasattr(item, "inputs")
]
for plasma_property in self.plasma_properties:
# Skipping any module that is an input module
if plasma_property in self.input_properties:
continue
for input in plasma_property.inputs:
if input not in self.outputs_dict:
raise PlasmaMissingModule(
f"Module {plasma_property.name} requires input "
f"{input} which has not been added"
f" to this plasma"
)
try:
position = self.outputs_dict[input].outputs.index(input)
label = self.outputs_dict[input].latex_name[position]
label = "$" + label + "$"
label = label.replace("\\", "\\\\")
except:
label = input.replace("_", "-")
self.graph.add_edge(
self.outputs_dict[input].name,
plasma_property.name,
label=label,
)
def _init_properties(
self, plasma_properties, property_kwargs=None, **kwargs
):
"""
Builds a dictionary with the plasma module names as keys
Parameters
----------
plasma_modules : list
list of Plasma properties
property_kwargs : dict
dict of plasma module : kwargs pairs. kwargs should be a dict
of arguments that will be passed to the __init__ method of
the respective plasma module.
kwargs : dictionary
input values for input properties. For example,
t_rad=[5000, 6000,], j_blues=[..]
"""
if property_kwargs is None:
property_kwargs = {}
plasma_property_objects = []
self.previous_iteration_properties = []
self.outputs_dict = {}
for plasma_property in plasma_properties:
if issubclass(plasma_property, PreviousIterationProperty):
current_property_object = plasma_property(
**property_kwargs.get(plasma_property, {})
)
current_property_object.set_initial_value(kwargs)
self.previous_iteration_properties.append(
current_property_object
)
elif issubclass(plasma_property, Input):
if not set(kwargs.keys()).issuperset(plasma_property.outputs):
missing_input_values = set(plasma_property.outputs) - set(
kwargs.keys()
)
raise NotInitializedModule(
f"Input {missing_input_values} required for "
f"plasma but not given when "
f"instantiating the "
f"plasma"
)
current_property_object = plasma_property(
**property_kwargs.get(plasma_property, {})
)
else:
current_property_object = plasma_property(
self, **property_kwargs.get(plasma_property, {})
)
for output in plasma_property.outputs:
self.outputs_dict[output] = current_property_object
plasma_property_objects.append(current_property_object)
return plasma_property_objects
def store_previous_properties(self):
for property in self.previous_iteration_properties:
p = property.outputs[0]
self.outputs_dict[p].set_value(
self.get_value(re.sub(r"^previous_", "", p))
)
def update(self, **kwargs):
for key in kwargs:
if key not in self.outputs_dict:
raise PlasmaMissingModule(
f"Trying to update property {key}" f" that is unavailable"
)
self.outputs_dict[key].set_value(kwargs[key])
for module_name in self._resolve_update_list(kwargs.keys()):
self.plasma_properties_dict[module_name].update()
def freeze(self, *args):
"""
Freeze plama properties.
This method freezes plasma properties to prevent them from being
updated: the values of a frozen property are fixed in the plasma
calculation. This is useful for example for setting up test cases.
Parameters
----------
args : iterable of str
Names of plasma properties to freeze.
Examples
--------
>>> plasma.freeze('t_electrons')
"""
for key in args:
if key not in self.outputs_dict:
raise PlasmaMissingModule(
"Trying to freeze property {0}"
" that is unavailable".format(key)
)
self.outputs_dict[key].frozen = True
def thaw(self, *args):
"""
Thaw plama properties.
This method thaws (unfreezes) plasma properties allowing them to be
updated again.
Parameters
----------
args : iterable of str
Names of plasma properties to unfreeze.
Examples
--------
>>> plasma.thaw('t_electrons')
"""
for key in args:
if key not in self.outputs_dict:
raise PlasmaMissingModule(
"Trying to thaw property {0}"
" that is unavailable".format(key)
)
self.outputs_dict[key].frozen = False
def _update_module_type_str(self):
for node in self.graph:
self.outputs_dict[node]._update_type_str()
def _resolve_update_list(self, changed_properties):
"""
Returns a list of all plasma models which are affected by the
changed_modules due to there dependency in the
the plasma_graph.
Parameters
----------
changed_modules : list
all modules changed in the plasma
Returns
-------
: list
all affected modules.
"""
descendants_ob = []
for plasma_property in changed_properties:
node_name = self.outputs_dict[plasma_property].name
descendants_ob += nx.descendants(self.graph, node_name)
descendants_ob = list(set(descendants_ob))
sort_order = list(nx.topological_sort(self.graph))
descendants_ob.sort(key=lambda val: sort_order.index(val))
logger.debug(
f"Updating modules in the following order:"
f'{"->".join(descendants_ob)}'
)
return descendants_ob
def write_to_dot(self, fname, args=None, latex_label=True):
"""
This method takes the NetworkX Graph generated from the _build_graph
method, converts it into a DOT code, and saves it to a file
Parameters
----------
fname: str
the name of the file the graph will be saved to
args: list
a list of optional settings for displaying the
graph written in DOT format
latex_label: boolean
enables/disables writing LaTeX equations and
edge labels into the file.
"""
try:
import pygraphviz
except:
logger.warn(
"pygraphviz missing. Plasma graph will not be " "generated."
)
return
print_graph = self.graph.copy()
print_graph = self.remove_hidden_properties(print_graph)
for node in print_graph:
if latex_label:
if hasattr(self.plasma_properties_dict[node], "latex_formula"):
print_graph.nodes[str(node)][
"label"
] = f"\\\\textrm{{{node}: }}"
node_list = self.plasma_properties_dict[node]
formulae = node_list.latex_formula
for output in range(0, len(formulae)):
formula = formulae[output]
label = formula.replace("\\", "\\\\")
print_graph.nodes[str(node)]["label"] += label
else:
print_graph.nodes[str(node)][
"label"
] = f"\\\\textrm{{{node}}}"
else:
print_graph.nodes[str(node)]["label"] = node
for edge in print_graph.edges:
label = print_graph.edges[edge]["label"]
print_graph.edges[edge]["label"] = "-"
print_graph.edges[edge]["texlbl"] = label
nx.drawing.nx_agraph.write_dot(print_graph, fname)
for line in fileinput.FileInput(fname, inplace=1):
if latex_label:
print(
line.replace(
r'node [label="\N"]',
r'node [texmode="math"]',
),
end="",
)
else:
print(
line.replace(
r'node [label="\N"];',
"",
),
end="",
)
if args is not None:
with open(fname, "r") as file:
lines = file.readlines()
for newline in args:
lines.insert(1, f"\t{newline};\n")
with open(fname, "w") as f:
lines = "".join(lines)
f.write(lines)
def write_to_tex(self, fname_graph, scale=0.5, args=None, latex_label=True):
"""
This method takes the NetworkX Graph generated from the _build_graph
method, converts it into a LaTeX friendly format,
and saves it to a file
Parameters
----------
fname_graph: str
the name of the file the graph will be saved to
args: list
a list of optional settings for displaying the
graph written in DOT format
scale: float
a scaling factor to expand/contract the generated
graph
latex_label: boolean
enables/disables writing LaTeX equations and
edge labels into the file.
"""
try:
import dot2tex
except:
logger.warn(
"dot2tex missing. Plasma graph will not be " "generated."
)
return
temp_fname = tempfile.NamedTemporaryFile().name
self.write_to_dot(temp_fname, args=args, latex_label=latex_label)
with open(temp_fname, "r") as file:
dot_string = file.read().replace("\\\\", "\\")
texcode = dot2tex.dot2tex(
dot_string, format="tikz", crop=True, valignmode="dot"
)
with open(fname_graph, "w") as file:
file.write(texcode)
for line in fileinput.input(fname_graph, inplace=1):
print(
line.replace(
r"\documentclass{article}",
r"\documentclass[class=minimal,border=20pt]{standalone}",
),
end="",
)
for line in fileinput.input(fname_graph, inplace=1):
print(line.replace(r"\enlargethispage{100cm}", ""), end="")
for line in fileinput.input(fname_graph, inplace=1):
print(
line.replace(
r"\begin{tikzpicture}[>=latex',line join=bevel,]",
r"\begin{tikzpicture}"
r"[>=latex',line join=bevel,"
rf"scale={scale}]",
),
end="",
)
def remove_hidden_properties(self, print_graph):
for item in self.plasma_properties_dict.values():
module = self.plasma_properties_dict[item.name].__class__
if issubclass(module, HiddenPlasmaProperty):
output = module.outputs[0]
for value in self.plasma_properties_dict.keys():
if output in getattr(
self.plasma_properties_dict[value], "inputs", []
):
for input in self.plasma_properties_dict[
item.name
].inputs:
try:
position = self.outputs_dict[
input
].outputs.index(input)
label = self.outputs_dict[input].latex_name[
position
]
label = "$" + label + "$"
label = label.replace("\\", "\\\\")
except:
label = input.replace("_", "-")
self.graph.add_edge(
self.outputs_dict[input].name,
value,
label=label,
)
print_graph.remove_node(str(item.name))
return print_graph
| 34.551339
| 80
| 0.520964
|
4a060be1d633c688810302c28abf45c39d401eb0
| 2,016
|
py
|
Python
|
src/federator-draft/test_federator_metrics.py
|
TaoHaoTian/federated-recommender-system
|
65a151238e1a419fc713d26fa11ecfe4536d94ee
|
[
"MIT"
] | 3
|
2020-10-18T13:45:33.000Z
|
2021-12-14T13:01:52.000Z
|
src/federator-draft/test_federator_metrics.py
|
TaoHaoTian/federated-recommender-system
|
65a151238e1a419fc713d26fa11ecfe4536d94ee
|
[
"MIT"
] | 1
|
2019-09-23T22:08:57.000Z
|
2019-09-23T22:08:57.000Z
|
src/federator-draft/test_federator_metrics.py
|
TaoHaoTian/federated-recommender-system
|
65a151238e1a419fc713d26fa11ecfe4536d94ee
|
[
"MIT"
] | 2
|
2020-09-08T08:18:03.000Z
|
2021-02-22T02:53:25.000Z
|
from lightfm_alg import LightFMAlg
from surprise_svd import SurpriseSVD
from data_handler import DataHandler
from definitions import ROOT_DIR
import helpers
import matplotlib.pyplot as plt
def test_alg_times():
dh = DataHandler(filename=ROOT_DIR + "/datasets/ml-25m/ratings.csv")
dh.dataset = dh.sort_dataset_randomly()
# Test benchmark times
ratings_sizes = [100, 1000, 10000, 100000, 1000000, 10000000]
for i in ratings_sizes:
ds = dh.dataset[:i]
user = ds[:, 0][0]
#lfm = LightFMAlg(ds=dh.dataset, labels_ds="/datasets/ml-latest-small/movies.csv")
#lfm.generate_rec(user)
filename = "test_metrics_%d" % i
svd = SurpriseSVD(ds=ds, sl_filename=filename,
movies_filename="/datasets/ml-25m/movies.csv")
svd.get_top_n(user)
def plot_thresholds(rating_threshold, ratings, users, items):
fig = plt.figure()
fig.set_size_inches(6.4, 2.4)
ax = plt.subplot(111)
plt.title("Users and Movies with Ratings Above Threshold")
plt.ylabel("Users/Items Above Threshold")
plt.xlabel("Rating Threshold")
#ax.plot(rating_threshold, ratings, label="total ratings")
ax.plot(rating_threshold, users, label="users")
ax.plot(rating_threshold, items, label="movies")
ax.legend()
# Put a legend below current axis
save_filename = "zipfs_law.pdf"
fig.savefig(save_filename, format="pdf", bbox_inches='tight')
fig.show()
def calculate_thresholds():
dh = DataHandler(filename=ROOT_DIR + "/datasets/ml-25m/ratings.csv")
thresholds = [0, 10, 25, 50, 100, 250, 500, 750, 1000]
ratings = []
users = []
items = []
for t in thresholds:
_, _, results = helpers.remove_below_threshold_user_and_items(dh.dataset, u_thresh=t, i_thresh=t)
ratings.append(results[1])
users.append(results[3])
items.append(results[5])
plot_thresholds(thresholds, ratings, users, items)
"""
Run methods
"""
#test_alg_times()
calculate_thresholds()
| 32.516129
| 105
| 0.679563
|
4a060cce92c56f3c8f8a4e3dc340246bff20659b
| 66,516
|
py
|
Python
|
cinder/tests/unit/volume/drivers/test_nfs.py
|
mail2nsrajesh/cinder
|
a688b872bec6d1abd4dcd852bdb8e8a921369d2e
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/unit/volume/drivers/test_nfs.py
|
mail2nsrajesh/cinder
|
a688b872bec6d1abd4dcd852bdb8e8a921369d2e
|
[
"Apache-2.0"
] | 2
|
2018-10-25T13:04:01.000Z
|
2019-08-17T13:15:24.000Z
|
cinder/tests/unit/volume/drivers/test_nfs.py
|
mail2nsrajesh/cinder
|
a688b872bec6d1abd4dcd852bdb8e8a921369d2e
|
[
"Apache-2.0"
] | 2
|
2018-10-17T13:32:50.000Z
|
2018-11-08T08:39:39.000Z
|
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NFS driver module."""
import ddt
import errno
import os
import six
import uuid
import mock
from oslo_utils import imageutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.volume import configuration as conf
from cinder.volume.drivers import nfs
from cinder.volume.drivers import remotefs
class RemoteFsDriverTestCase(test.TestCase):
TEST_FILE_NAME = 'test.txt'
TEST_EXPORT = 'nas-host1:/export'
TEST_MNT_POINT = '/mnt/nas'
def setUp(self):
super(RemoteFsDriverTestCase, self).setUp()
self._driver = remotefs.RemoteFSDriver()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.append_config_values(mock.ANY)
self.configuration.nas_secure_file_permissions = 'false'
self.configuration.nas_secure_file_operations = 'false'
self.configuration.nfs_snapshot_support = True
self.configuration.max_over_subscription_ratio = 1.0
self.configuration.reserved_percentage = 5
self._driver = remotefs.RemoteFSDriver(
configuration=self.configuration)
mock_exc = mock.patch.object(self._driver, '_execute')
self._execute = mock_exc.start()
self.addCleanup(mock_exc.stop)
def test_create_sparsed_file(self):
self._driver._create_sparsed_file('/path', 1)
self._execute.assert_called_once_with('truncate', '-s', '1G',
'/path', run_as_root=True)
def test_create_regular_file(self):
self._driver._create_regular_file('/path', 1)
self._execute.assert_called_once_with('dd', 'if=/dev/zero',
'of=/path', 'bs=1M',
'count=1024', run_as_root=True)
def test_create_qcow2_file(self):
file_size = 1
self._driver._create_qcow2_file('/path', file_size)
self._execute.assert_called_once_with('qemu-img', 'create', '-f',
'qcow2', '-o',
'preallocation=metadata',
'/path', '%s' %
str(file_size * units.Gi),
run_as_root=True)
def test_set_rw_permissions_for_all(self):
self._driver._set_rw_permissions_for_all('/path')
self._execute.assert_called_once_with('chmod', 'ugo+rw', '/path',
run_as_root=True)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_with_secure_file_permissions(self, LOG):
self._driver._mounted_shares = [self.TEST_EXPORT]
self.configuration.nas_secure_file_permissions = 'true'
self._driver._set_rw_permissions(self.TEST_FILE_NAME)
self.assertFalse(LOG.warning.called)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_without_secure_file_permissions(self, LOG):
self.configuration.nas_secure_file_permissions = 'false'
self._driver._set_rw_permissions(self.TEST_FILE_NAME)
self.assertTrue(LOG.warning.called)
warn_msg = "%(path)s is being set with open permissions: %(perm)s"
LOG.warning.assert_called_once_with(
warn_msg, {'path': self.TEST_FILE_NAME, 'perm': 'ugo+rw'})
@mock.patch('os.path.join')
@mock.patch('os.path.isfile', return_value=False)
def test_determine_nas_security_options_when_auto_and_new_install(
self,
mock_isfile,
mock_join):
"""Test the setting of the NAS Security Option
In this test case, we will create the marker file. No pre-exxisting
Cinder volumes found during bootup.
"""
self._driver._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = True
self._driver._ensure_shares_mounted = mock.Mock()
nas_mount = self._driver._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
mock_join.return_value = file_path
secure_file_permissions = 'auto'
nas_option = self._driver._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'auto'
nas_option = self._driver._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
@mock.patch('os.path.join')
@mock.patch('os.path.isfile')
def test_determine_nas_security_options_when_auto_and_new_install_exists(
self,
isfile,
join):
"""Test the setting of the NAS Security Option
In this test case, the marker file already exists. Cinder volumes
found during bootup.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
join.return_value = file_path
isfile.return_value = True
secure_file_permissions = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
@mock.patch('os.path.join')
@mock.patch('os.path.isfile')
def test_determine_nas_security_options_when_auto_and_old_install(self,
isfile,
join):
"""Test the setting of the NAS Security Option
In this test case, the marker file does not exist. There are also
pre-existing Cinder volumes.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
join.return_value = file_path
isfile.return_value = False
secure_file_permissions = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
secure_file_operations = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
def test_determine_nas_security_options_when_admin_set_true(self):
"""Test the setting of the NAS Security Option
In this test case, the Admin set the flag to 'true'.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
secure_file_permissions = 'true'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'true'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
def test_determine_nas_security_options_when_admin_set_false(self):
"""Test the setting of the NAS Security Option
In this test case, the Admin set the flag to 'false'.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
secure_file_permissions = 'false'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
secure_file_operations = 'false'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
@mock.patch.object(remotefs, 'LOG')
def test_set_nas_security_options(self, LOG):
"""Test setting of NAS Security options.
The RemoteFS driver will force set options to false. The derived
objects will provide an inherited interface to properly set options.
"""
drv = self._driver
is_new_install = False
drv.set_nas_security_options(is_new_install)
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
self.assertTrue(LOG.warning.called)
def test_secure_file_operations_enabled_true(self):
"""Test nas_secure_file_operations = 'true'
Networked file system based drivers may support secure file
operations. This test verifies the settings when secure.
"""
drv = self._driver
self.configuration.nas_secure_file_operations = 'true'
ret_flag = drv.secure_file_operations_enabled()
self.assertTrue(ret_flag)
def test_secure_file_operations_enabled_false(self):
"""Test nas_secure_file_operations = 'false'
Networked file system based drivers may support secure file
operations. This test verifies the settings when not secure.
"""
drv = self._driver
self.configuration.nas_secure_file_operations = 'false'
ret_flag = drv.secure_file_operations_enabled()
self.assertFalse(ret_flag)
# NFS configuration scenarios
NFS_CONFIG1 = {'max_over_subscription_ratio': 1.0,
'reserved_percentage': 0,
'nfs_sparsed_volumes': True,
'nfs_qcow2_volumes': False,
'nas_secure_file_permissions': 'false',
'nas_secure_file_operations': 'false'}
NFS_CONFIG2 = {'max_over_subscription_ratio': 10.0,
'reserved_percentage': 5,
'nfs_sparsed_volumes': False,
'nfs_qcow2_volumes': True,
'nas_secure_file_permissions': 'true',
'nas_secure_file_operations': 'true'}
NFS_CONFIG3 = {'max_over_subscription_ratio': 15.0,
'reserved_percentage': 10,
'nfs_sparsed_volumes': False,
'nfs_qcow2_volumes': False,
'nas_secure_file_permissions': 'auto',
'nas_secure_file_operations': 'auto'}
NFS_CONFIG4 = {'max_over_subscription_ratio': 20.0,
'reserved_percentage': 60,
'nfs_sparsed_volumes': True,
'nfs_qcow2_volumes': True,
'nas_secure_file_permissions': 'false',
'nas_secure_file_operations': 'true'}
QEMU_IMG_INFO_OUT1 = """image: %(volid)s
file format: raw
virtual size: %(size_gb)sG (%(size_b)s bytes)
disk size: 173K
"""
QEMU_IMG_INFO_OUT2 = """image: %(volid)s
file format: qcow2
virtual size: %(size_gb)sG (%(size_b)s bytes)
disk size: 196K
cluster_size: 65536
Format specific information:
compat: 1.1
lazy refcounts: false
refcount bits: 16
corrupt: false
"""
QEMU_IMG_INFO_OUT3 = """image: volume-%(volid)s.%(snapid)s
file format: qcow2
virtual size: %(size_gb)sG (%(size_b)s bytes)
disk size: 196K
cluster_size: 65536
backing file: volume-%(volid)s
backing file format: qcow2
Format specific information:
compat: 1.1
lazy refcounts: false
refcount bits: 16
corrupt: false
"""
QEMU_IMG_INFO_OUT4 = """image: volume-%(volid)s.%(snapid)s
file format: raw
virtual size: %(size_gb)sG (%(size_b)s bytes)
disk size: 196K
cluster_size: 65536
backing file: volume-%(volid)s
backing file format: raw
Format specific information:
compat: 1.1
lazy refcounts: false
refcount bits: 16
corrupt: false
"""
@ddt.ddt
class NfsDriverTestCase(test.TestCase):
"""Test case for NFS driver."""
TEST_NFS_HOST = 'nfs-host1'
TEST_NFS_SHARE_PATH = '/export'
TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH)
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
TEST_NFS_EXPORT2_OPTIONS = '-o intr'
TEST_SIZE_IN_GB = 1
TEST_MNT_POINT = '/mnt/nfs'
TEST_MNT_POINT_BASE_EXTRA_SLASH = '/opt/stack/data/cinder//mnt'
TEST_MNT_POINT_BASE = '/mnt/test'
TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
TEST_FILE_NAME = 'test.txt'
TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf'
TEST_NFS_EXPORT_SPACES = 'nfs-host3:/export this'
TEST_MNT_POINT_SPACES = '/ 0 0 0 /foo'
VOLUME_UUID = '69ad4ff6-b892-4215-aaaa-aaaaaaaaaaaa'
def setUp(self):
super(NfsDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.append_config_values(mock.ANY)
self.configuration.max_over_subscription_ratio = 1.0
self.configuration.reserved_percentage = 5
self.configuration.nfs_shares_config = None
self.configuration.nfs_sparsed_volumes = True
self.configuration.nfs_reserved_percentage = 5.0
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.configuration.nfs_mount_options = None
self.configuration.nfs_mount_attempts = 3
self.configuration.nfs_qcow2_volumes = False
self.configuration.nas_secure_file_permissions = 'false'
self.configuration.nas_secure_file_operations = 'false'
self.configuration.nas_host = None
self.configuration.nas_share_path = None
self.configuration.nas_mount_options = None
self.configuration.volume_dd_blocksize = '1M'
self.context = context.get_admin_context()
def _set_driver(self, extra_confs=None):
# Overide the default configs
if extra_confs:
for config_name, config_value in extra_confs.items():
setattr(self.configuration, config_name, config_value)
self._driver = nfs.NfsDriver(configuration=self.configuration)
self._driver.shares = {}
self.mock_object(self._driver, '_execute')
@ddt.data(NFS_CONFIG1, NFS_CONFIG2, NFS_CONFIG3, NFS_CONFIG4)
def test_local_path(self, nfs_config):
"""local_path common use case."""
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self._set_driver(extra_confs=nfs_config)
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
provider_location=self.TEST_NFS_EXPORT1)
self.assertEqual(
'/mnt/test/2f4f60214cf43c595666dd815f0360a4/%s' % volume.name,
drv.local_path(volume))
@ddt.data(NFS_CONFIG1, NFS_CONFIG2, NFS_CONFIG3, NFS_CONFIG4)
def test_copy_image_to_volume(self, nfs_config):
"""resize_image common case usage."""
mock_resize = self.mock_object(image_utils, 'resize_image')
mock_fetch = self.mock_object(image_utils, 'fetch_to_raw')
self._set_driver()
drv = self._driver
volume = fake_volume.fake_volume_obj(self.context,
size=self.TEST_SIZE_IN_GB)
test_img_source = 'volume-%s' % volume.id
self.mock_object(drv, 'local_path', return_value=test_img_source)
data = mock.Mock()
data.virtual_size = 1 * units.Gi
self.mock_object(image_utils, 'qemu_img_info', return_value=data)
drv.copy_image_to_volume(None, volume, None, None)
mock_fetch.assert_called_once_with(
None, None, None, test_img_source, mock.ANY, run_as_root=True,
size=self.TEST_SIZE_IN_GB)
mock_resize.assert_called_once_with(test_img_source,
self.TEST_SIZE_IN_GB,
run_as_root=True)
def test_get_mount_point_for_share(self):
"""_get_mount_point_for_share should calculate correct value."""
self._set_driver()
drv = self._driver
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4',
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
def test_get_mount_point_for_share_given_extra_slash_in_state_path(self):
"""_get_mount_point_for_share should calculate correct value."""
# This test gets called with the extra slash
self.configuration.nfs_mount_point_base = (
self.TEST_MNT_POINT_BASE_EXTRA_SLASH)
# The driver gets called with the correct configuration and removes
# the extra slash
drv = nfs.NfsDriver(configuration=self.configuration)
self.assertEqual('/opt/stack/data/cinder/mnt', drv.base)
self.assertEqual(
'/opt/stack/data/cinder/mnt/2f4f60214cf43c595666dd815f0360a4',
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
def test_get_capacity_info(self):
"""_get_capacity_info should calculate correct value."""
self._set_driver()
drv = self._driver
stat_total_size = 2620544
stat_avail = 2129984
stat_output = '1 %d %d' % (stat_total_size, stat_avail)
du_used = 490560
du_output = '%d /mnt' % du_used
with mock.patch.object(
drv, '_get_mount_point_for_share') as mock_get_mount:
mock_get_mount.return_value = self.TEST_MNT_POINT
drv._execute.side_effect = [(stat_output, None),
(du_output, None)]
self.assertEqual((stat_total_size, stat_avail, du_used),
drv._get_capacity_info(self.TEST_NFS_EXPORT1))
mock_get_mount.assert_called_once_with(self.TEST_NFS_EXPORT1)
calls = [mock.call('stat', '-f', '-c', '%S %b %a',
self.TEST_MNT_POINT, run_as_root=True),
mock.call('du', '-sb', '--apparent-size',
'--exclude', '*snapshot*',
self.TEST_MNT_POINT, run_as_root=True)]
drv._execute.assert_has_calls(calls)
def test_get_capacity_info_for_share_and_mount_point_with_spaces(self):
"""_get_capacity_info should calculate correct value."""
self._set_driver()
drv = self._driver
stat_total_size = 2620544
stat_avail = 2129984
stat_output = '1 %d %d' % (stat_total_size, stat_avail)
du_used = 490560
du_output = '%d /mnt' % du_used
with mock.patch.object(
drv, '_get_mount_point_for_share') as mock_get_mount:
mock_get_mount.return_value = self.TEST_MNT_POINT_SPACES
drv._execute.side_effect = [(stat_output, None),
(du_output, None)]
self.assertEqual((stat_total_size, stat_avail, du_used),
drv._get_capacity_info(
self.TEST_NFS_EXPORT_SPACES))
mock_get_mount.assert_called_once_with(
self.TEST_NFS_EXPORT_SPACES)
calls = [mock.call('stat', '-f', '-c', '%S %b %a',
self.TEST_MNT_POINT_SPACES, run_as_root=True),
mock.call('du', '-sb', '--apparent-size',
'--exclude', '*snapshot*',
self.TEST_MNT_POINT_SPACES, run_as_root=True)]
drv._execute.assert_has_calls(calls)
def test_load_shares_config(self):
self._set_driver()
drv = self._driver
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
config_data.append('#' + self.TEST_NFS_EXPORT2)
config_data.append('')
config_data.append(self.TEST_NFS_EXPORT2 + ' ' +
self.TEST_NFS_EXPORT2_OPTIONS)
config_data.append('broken:share_format')
mock_read_config.return_value = config_data
drv._load_shares_config(drv.configuration.nfs_shares_config)
mock_read_config.assert_called_once_with(
self.TEST_SHARES_CONFIG_FILE)
self.assertIn(self.TEST_NFS_EXPORT1, drv.shares)
self.assertIn(self.TEST_NFS_EXPORT2, drv.shares)
self.assertEqual(2, len(drv.shares))
self.assertEqual(self.TEST_NFS_EXPORT2_OPTIONS,
drv.shares[self.TEST_NFS_EXPORT2])
def test_load_shares_config_nas_opts(self):
self._set_driver()
drv = self._driver
drv.configuration.nas_host = self.TEST_NFS_HOST
drv.configuration.nas_share_path = self.TEST_NFS_SHARE_PATH
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
drv._load_shares_config(drv.configuration.nfs_shares_config)
self.assertIn(self.TEST_NFS_EXPORT1, drv.shares)
self.assertEqual(1, len(drv.shares))
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
"""_ensure_shares_mounted should save share if mounted with success."""
self._set_driver()
drv = self._driver
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure:
mock_read_config.return_value = config_data
drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
mock_ensure.assert_called_once_with(self.TEST_NFS_EXPORT1)
@mock.patch.object(remotefs, 'LOG')
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self,
LOG):
"""_ensure_shares_mounted should not save share if failed to mount."""
self._set_driver()
drv = self._driver
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure:
mock_read_config.return_value = config_data
drv._ensure_share_mounted()
self.assertEqual(0, len(drv._mounted_shares))
mock_ensure.assert_called_once_with()
def test_find_share_should_throw_error_if_there_is_no_mounted_share(self):
"""_find_share should throw error if there is no mounted shares."""
self._set_driver()
drv = self._driver
drv._mounted_shares = []
self.assertRaises(exception.NfsNoSharesMounted, drv._find_share,
self._simple_volume())
def test_find_share(self):
"""_find_share simple use case."""
self._set_driver()
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
volume = fake_volume.fake_volume_obj(self.context,
size=self.TEST_SIZE_IN_GB)
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(5 * units.Gi, 2 * units.Gi, 2 * units.Gi),
(10 * units.Gi, 3 * units.Gi, 1 * units.Gi)]
self.assertEqual(self.TEST_NFS_EXPORT2,
drv._find_share(volume))
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertEqual(2, mock_get_capacity_info.call_count)
def test_find_share_should_throw_error_if_there_is_not_enough_space(self):
"""_find_share should throw error if there is no share to host vol."""
self._set_driver()
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(5 * units.Gi, 0, 5 * units.Gi),
(10 * units.Gi, 0, 10 * units.Gi)]
self.assertRaises(exception.NfsNoSuitableShareFound,
drv._find_share, self._simple_volume())
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertEqual(2, mock_get_capacity_info.call_count)
def _simple_volume(self, size=10):
loc = self.TEST_NFS_EXPORT1
return fake_volume.fake_volume_obj(self.context,
display_name='volume_name',
provider_location=loc,
size=size)
def test_create_sparsed_volume(self):
self._set_driver()
drv = self._driver
volume = self._simple_volume()
self.override_config('nfs_sparsed_volumes', True)
with mock.patch.object(
drv, '_create_sparsed_file') as mock_create_sparsed_file:
with mock.patch.object(
drv, '_set_rw_permissions') as mock_set_rw_permissions:
drv._do_create_volume(volume)
mock_create_sparsed_file.assert_called_once_with(mock.ANY,
mock.ANY)
mock_set_rw_permissions.assert_called_once_with(mock.ANY)
def test_create_nonsparsed_volume(self):
self._set_driver()
drv = self._driver
self.configuration.nfs_sparsed_volumes = False
volume = self._simple_volume()
self.override_config('nfs_sparsed_volumes', False)
with mock.patch.object(
drv, '_create_regular_file') as mock_create_regular_file:
with mock.patch.object(
drv, '_set_rw_permissions') as mock_set_rw_permissions:
drv._do_create_volume(volume)
mock_create_regular_file.assert_called_once_with(mock.ANY,
mock.ANY)
mock_set_rw_permissions.assert_called_once_with(mock.ANY)
@mock.patch.object(nfs, 'LOG')
def test_create_volume_should_ensure_nfs_mounted(self, mock_log):
"""create_volume ensures shares provided in config are mounted."""
self._set_driver()
drv = self._driver
drv._find_share = mock.Mock()
drv._find_share.return_value = self.TEST_NFS_EXPORT1
drv._do_create_volume = mock.Mock()
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure_share:
drv._ensure_share_mounted()
volume = fake_volume.fake_volume_obj(self.context,
size=self.TEST_SIZE_IN_GB)
drv.create_volume(volume)
mock_ensure_share.assert_called_once_with()
@mock.patch.object(nfs, 'LOG')
def test_create_volume_should_return_provider_location(self, mock_log):
"""create_volume should return provider_location with found share."""
self._set_driver()
drv = self._driver
drv._ensure_shares_mounted = mock.Mock()
drv._do_create_volume = mock.Mock()
with mock.patch.object(drv, '_find_share') as mock_find_share:
mock_find_share.return_value = self.TEST_NFS_EXPORT1
volume = fake_volume.fake_volume_obj(self.context,
size=self.TEST_SIZE_IN_GB)
result = drv.create_volume(volume)
self.assertEqual(self.TEST_NFS_EXPORT1,
result['provider_location'])
mock_find_share.assert_called_once_with(volume)
def test_delete_volume(self):
"""delete_volume simple test case."""
self._set_driver()
drv = self._driver
drv._ensure_share_mounted = mock.Mock()
volume = fake_volume.fake_volume_obj(
self.context,
display_name='volume-123',
provider_location=self.TEST_NFS_EXPORT1)
with mock.patch.object(drv, 'local_path') as mock_local_path:
mock_local_path.return_value = self.TEST_LOCAL_PATH
drv.delete_volume(volume)
mock_local_path.assert_called_with(volume)
drv._execute.assert_called_once()
def test_delete_should_ensure_share_mounted(self):
"""delete_volume should ensure that corresponding share is mounted."""
self._set_driver()
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
display_name='volume-123',
provider_location=self.TEST_NFS_EXPORT1)
with mock.patch.object(drv, '_ensure_share_mounted'):
drv.delete_volume(volume)
def test_delete_should_not_delete_if_provider_location_not_provided(self):
"""delete_volume shouldn't delete if provider_location missed."""
self._set_driver()
drv = self._driver
volume = fake_volume.fake_volume_obj(self.context,
name='volume-123',
provider_location=None)
with mock.patch.object(drv, '_ensure_share_mounted'):
drv.delete_volume(volume)
self.assertFalse(drv._execute.called)
def test_get_volume_stats(self):
"""get_volume_stats must fill the correct values."""
self._set_driver()
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
with mock.patch.object(
drv, '_ensure_shares_mounted') as mock_ensure_share:
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(10 * units.Gi, 2 * units.Gi, 2 * units.Gi),
(20 * units.Gi, 3 * units.Gi, 3 * units.Gi)]
drv._ensure_shares_mounted()
drv.get_volume_stats()
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertTrue(mock_ensure_share.called)
self.assertEqual(30.0, drv._stats['total_capacity_gb'])
self.assertEqual(5.0, drv._stats['free_capacity_gb'])
self.assertEqual(5, drv._stats['reserved_percentage'])
self.assertTrue(drv._stats['sparse_copy_volume'])
def test_get_volume_stats_with_non_zero_reserved_percentage(self):
"""get_volume_stats must fill the correct values."""
self.configuration.reserved_percentage = 10.0
drv = nfs.NfsDriver(configuration=self.configuration)
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
with mock.patch.object(
drv, '_ensure_shares_mounted') as mock_ensure_share:
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(10 * units.Gi, 2 * units.Gi, 2 * units.Gi),
(20 * units.Gi, 3 * units.Gi, 3 * units.Gi)]
drv._ensure_shares_mounted()
drv.get_volume_stats()
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertTrue(mock_ensure_share.called)
self.assertEqual(30.0, drv._stats['total_capacity_gb'])
self.assertEqual(5.0, drv._stats['free_capacity_gb'])
self.assertEqual(10.0, drv._stats['reserved_percentage'])
@ddt.data(True, False)
def test_update_volume_stats(self, thin):
self._set_driver()
self._driver.configuration.max_over_subscription_ratio = 20.0
self._driver.configuration.reserved_percentage = 5.0
self._driver.configuration.nfs_sparsed_volumes = thin
remotefs_volume_stats = {
'volume_backend_name': 'fake_backend_name',
'vendor_name': 'fake_vendor',
'driver_version': 'fake_version',
'storage_protocol': 'NFS',
'total_capacity_gb': 100.0,
'free_capacity_gb': 20.0,
'reserved_percentage': 5.0,
'QoS_support': False,
}
self.mock_object(remotefs.RemoteFSDriver, '_update_volume_stats')
self._driver._stats = remotefs_volume_stats
mock_get_provisioned_capacity = self.mock_object(
self._driver, '_get_provisioned_capacity', return_value=25.0)
self._driver._update_volume_stats()
nfs_added_volume_stats = {
'provisioned_capacity_gb': 25.0 if thin else 80.0,
'max_over_subscription_ratio': 20.0,
'reserved_percentage': 5.0,
'thin_provisioning_support': thin,
'thick_provisioning_support': not thin,
}
expected = remotefs_volume_stats
expected.update(nfs_added_volume_stats)
self.assertEqual(expected, self._driver._stats)
self.assertEqual(thin, mock_get_provisioned_capacity.called)
def _check_is_share_eligible(self, total_size, total_available,
total_allocated, requested_volume_size):
self._set_driver()
with mock.patch.object(self._driver, '_get_capacity_info')\
as mock_get_capacity_info:
mock_get_capacity_info.return_value = (total_size,
total_available,
total_allocated)
return self._driver._is_share_eligible('fake_share',
requested_volume_size)
def test_is_share_eligible(self):
self._set_driver()
total_size = 100.0 * units.Gi
total_available = 90.0 * units.Gi
total_allocated = 10.0 * units.Gi
requested_volume_size = 1 # GiB
self.assertTrue(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_share_eligibility_with_reserved_percentage(self):
self._set_driver()
total_size = 100.0 * units.Gi
total_available = 4.0 * units.Gi
total_allocated = 96.0 * units.Gi
requested_volume_size = 1 # GiB
# Check used > used_ratio statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_is_share_eligible_above_oversub_ratio(self):
self._set_driver()
total_size = 100.0 * units.Gi
total_available = 10.0 * units.Gi
total_allocated = 90.0 * units.Gi
requested_volume_size = 10 # GiB
# Check apparent_available <= requested_volume_size statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_is_share_eligible_reserved_space_above_oversub_ratio(self):
self._set_driver()
total_size = 100.0 * units.Gi
total_available = 10.0 * units.Gi
total_allocated = 100.0 * units.Gi
requested_volume_size = 1 # GiB
# Check total_allocated / total_size >= oversub_ratio
# statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_extend_volume(self):
"""Extend a volume by 1."""
self._set_driver()
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10',
size=1,
provider_location='nfs_share')
path = 'path'
newSize = volume['size'] + 1
with mock.patch.object(image_utils, 'resize_image') as resize:
with mock.patch.object(drv, 'local_path', return_value=path):
with mock.patch.object(drv, '_is_share_eligible',
return_value=True):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=True):
drv.extend_volume(volume, newSize)
resize.assert_called_once_with(path, newSize,
run_as_root=True)
def test_extend_volume_failure(self):
"""Error during extend operation."""
self._set_driver()
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10',
size=1,
provider_location='nfs_share')
with mock.patch.object(image_utils, 'resize_image'):
with mock.patch.object(drv, 'local_path', return_value='path'):
with mock.patch.object(drv, '_is_share_eligible',
return_value=True):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=False):
self.assertRaises(exception.ExtendVolumeError,
drv.extend_volume, volume, 2)
def test_extend_volume_insufficient_space(self):
"""Insufficient space on nfs_share during extend operation."""
self._set_driver()
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10',
size=1,
provider_location='nfs_share')
with mock.patch.object(image_utils, 'resize_image'):
with mock.patch.object(drv, 'local_path', return_value='path'):
with mock.patch.object(drv, '_is_share_eligible',
return_value=False):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=False):
self.assertRaises(exception.ExtendVolumeError,
drv.extend_volume, volume, 2)
def test_is_file_size_equal(self):
"""File sizes are equal."""
self._set_driver()
drv = self._driver
path = 'fake/path'
size = 2
data = mock.MagicMock()
data.virtual_size = size * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):
self.assertTrue(drv._is_file_size_equal(path, size))
def test_is_file_size_equal_false(self):
"""File sizes are not equal."""
self._set_driver()
drv = self._driver
path = 'fake/path'
size = 2
data = mock.MagicMock()
data.virtual_size = (size + 1) * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):
self.assertFalse(drv._is_file_size_equal(path, size))
@mock.patch.object(nfs, 'LOG')
def test_set_nas_security_options_when_true(self, LOG):
"""Test higher level setting of NAS Security options.
The NFS driver overrides the base method with a driver specific
version.
"""
self._set_driver()
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
is_new_install = True
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._determine_nas_security_option_setting = mock.Mock(
return_value='true')
drv.set_nas_security_options(is_new_install)
self.assertEqual('true', drv.configuration.nas_secure_file_operations)
self.assertEqual('true', drv.configuration.nas_secure_file_permissions)
self.assertFalse(LOG.warning.called)
@mock.patch.object(nfs, 'LOG')
def test_set_nas_security_options_when_false(self, LOG):
"""Test higher level setting of NAS Security options.
The NFS driver overrides the base method with a driver specific
version.
"""
self._set_driver()
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._determine_nas_security_option_setting = mock.Mock(
return_value='false')
drv.set_nas_security_options(is_new_install)
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
self.assertTrue(LOG.warning.called)
def test_set_nas_security_options_exception_if_no_mounted_shares(self):
"""Ensure proper exception is raised if there are no mounted shares."""
self._set_driver()
drv = self._driver
drv._ensure_shares_mounted = mock.Mock()
drv._mounted_shares = []
is_new_cinder_install = 'does not matter'
self.assertRaises(exception.NfsNoSharesMounted,
drv.set_nas_security_options,
is_new_cinder_install)
def test_ensure_share_mounted(self):
"""Case where the mount works the first time."""
self._set_driver()
self.mock_object(self._driver._remotefsclient, 'mount')
drv = self._driver
drv.configuration.nfs_mount_attempts = 3
drv.shares = {self.TEST_NFS_EXPORT1: ''}
drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
drv._remotefsclient.mount.called_once()
@mock.patch('time.sleep')
def test_ensure_share_mounted_exception(self, _mock_sleep):
"""Make the configured number of attempts when mounts fail."""
num_attempts = 3
self._set_driver()
self.mock_object(self._driver._remotefsclient, 'mount',
side_effect=Exception)
drv = self._driver
drv.configuration.nfs_mount_attempts = num_attempts
drv.shares = {self.TEST_NFS_EXPORT1: ''}
self.assertRaises(exception.NfsException, drv._ensure_share_mounted,
self.TEST_NFS_EXPORT1)
self.assertEqual(num_attempts, drv._remotefsclient.mount.call_count)
def test_ensure_share_mounted_at_least_one_attempt(self):
"""Make at least one mount attempt even if configured for less."""
min_num_attempts = 1
num_attempts = 0
self._set_driver()
self.mock_object(self._driver._remotefsclient, 'mount',
side_effect=Exception)
drv = self._driver
drv.configuration.nfs_mount_attempts = num_attempts
drv.shares = {self.TEST_NFS_EXPORT1: ''}
self.assertRaises(exception.NfsException, drv._ensure_share_mounted,
self.TEST_NFS_EXPORT1)
self.assertEqual(min_num_attempts,
drv._remotefsclient.mount.call_count)
@ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT3],
[NFS_CONFIG2, QEMU_IMG_INFO_OUT4],
[NFS_CONFIG3, QEMU_IMG_INFO_OUT3],
[NFS_CONFIG4, QEMU_IMG_INFO_OUT4])
@ddt.unpack
def test_copy_volume_from_snapshot(self, nfs_conf, qemu_img_info):
self._set_driver(extra_confs=nfs_conf)
drv = self._driver
dest_volume = self._simple_volume()
src_volume = self._simple_volume()
fake_snap = fake_snapshot.fake_snapshot_obj(self.context)
fake_snap.volume = src_volume
img_out = qemu_img_info % {'volid': src_volume.id,
'snapid': fake_snap.id,
'size_gb': src_volume.size,
'size_b': src_volume.size * units.Gi}
img_info = imageutils.QemuImgInfo(img_out)
mock_img_info = self.mock_object(image_utils, 'qemu_img_info')
mock_img_info.return_value = img_info
mock_convert_image = self.mock_object(image_utils, 'convert_image')
vol_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(src_volume.provider_location))
src_vol_path = os.path.join(vol_dir, img_info.backing_file)
dest_vol_path = os.path.join(vol_dir, dest_volume.name)
info_path = os.path.join(vol_dir, src_volume.name) + '.info'
snap_file = dest_volume.name + '.' + fake_snap.id
snap_path = os.path.join(vol_dir, snap_file)
size = dest_volume.size
mock_read_info_file = self.mock_object(drv, '_read_info_file')
mock_read_info_file.return_value = {'active': snap_file,
fake_snap.id: snap_file}
mock_permission = self.mock_object(drv, '_set_rw_permissions_for_all')
drv._copy_volume_from_snapshot(fake_snap, dest_volume, size)
mock_read_info_file.assert_called_once_with(info_path)
mock_img_info.assert_called_once_with(snap_path, run_as_root=True)
used_qcow = nfs_conf['nfs_qcow2_volumes']
mock_convert_image.assert_called_once_with(
src_vol_path, dest_vol_path, 'qcow2' if used_qcow else 'raw',
run_as_root=True)
mock_permission.assert_called_once_with(dest_vol_path)
@ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT3],
[NFS_CONFIG2, QEMU_IMG_INFO_OUT4],
[NFS_CONFIG3, QEMU_IMG_INFO_OUT3],
[NFS_CONFIG4, QEMU_IMG_INFO_OUT4])
@ddt.unpack
def test_create_volume_from_snapshot(self, nfs_conf, qemu_img_info):
self._set_driver(extra_confs=nfs_conf)
drv = self._driver
# Volume source of the snapshot we are trying to clone from. We need it
# to have a different id than the default provided.
src_volume = self._simple_volume(size=10)
src_volume.id = six.text_type(uuid.uuid4())
src_volume_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(
src_volume.provider_location))
src_volume_path = os.path.join(src_volume_dir, src_volume.name)
fake_snap = fake_snapshot.fake_snapshot_obj(self.context)
# Fake snapshot based in the previous created volume
snap_file = src_volume.name + '.' + fake_snap.id
fake_snap.volume = src_volume
fake_snap.status = 'available'
fake_snap.size = 10
# New fake volume where the snap will be copied
new_volume = self._simple_volume(size=10)
new_volume_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(
src_volume.provider_location))
new_volume_path = os.path.join(new_volume_dir, new_volume.name)
# Mocks
img_out = qemu_img_info % {'volid': src_volume.id,
'snapid': fake_snap.id,
'size_gb': src_volume.size,
'size_b': src_volume.size * units.Gi}
img_info = imageutils.QemuImgInfo(img_out)
mock_img_info = self.mock_object(image_utils, 'qemu_img_info')
mock_img_info.return_value = img_info
mock_ensure = self.mock_object(drv, '_ensure_shares_mounted')
mock_find_share = self.mock_object(drv, '_find_share',
return_value=self.TEST_NFS_EXPORT1)
mock_read_info_file = self.mock_object(drv, '_read_info_file')
mock_read_info_file.return_value = {'active': snap_file,
fake_snap.id: snap_file}
mock_convert_image = self.mock_object(image_utils, 'convert_image')
self.mock_object(drv, '_create_qcow2_file')
self.mock_object(drv, '_create_regular_file')
self.mock_object(drv, '_create_regular_file')
self.mock_object(drv, '_set_rw_permissions')
self.mock_object(drv, '_read_file')
ret = drv.create_volume_from_snapshot(new_volume, fake_snap)
# Test asserts
self.assertEqual(self.TEST_NFS_EXPORT1, ret['provider_location'])
used_qcow = nfs_conf['nfs_qcow2_volumes']
mock_convert_image.assert_called_once_with(
src_volume_path, new_volume_path, 'qcow2' if used_qcow else 'raw',
run_as_root=True)
mock_ensure.assert_called_once()
mock_find_share.assert_called_once_with(new_volume)
def test_create_volume_from_snapshot_status_not_available(self):
"""Expect an error when the snapshot's status is not 'available'."""
self._set_driver()
drv = self._driver
src_volume = self._simple_volume()
fake_snap = fake_snapshot.fake_snapshot_obj(self.context)
fake_snap.volume = src_volume
new_volume = self._simple_volume()
new_volume['size'] = fake_snap['volume_size']
self.assertRaises(exception.InvalidSnapshot,
drv.create_volume_from_snapshot,
new_volume,
fake_snap)
@ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT1],
[NFS_CONFIG2, QEMU_IMG_INFO_OUT2],
[NFS_CONFIG3, QEMU_IMG_INFO_OUT1],
[NFS_CONFIG4, QEMU_IMG_INFO_OUT2])
@ddt.unpack
def test_initialize_connection(self, nfs_confs, qemu_img_info):
self._set_driver(extra_confs=nfs_confs)
drv = self._driver
volume = self._simple_volume()
vol_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(volume.provider_location))
vol_path = os.path.join(vol_dir, volume.name)
mock_img_utils = self.mock_object(image_utils, 'qemu_img_info')
img_out = qemu_img_info % {'volid': volume.id, 'size_gb': volume.size,
'size_b': volume.size * units.Gi}
mock_img_utils.return_value = imageutils.QemuImgInfo(img_out)
self.mock_object(drv, '_read_info_file',
return_value={'active': "volume-%s" % volume.id})
conn_info = drv.initialize_connection(volume, None)
mock_img_utils.assert_called_once_with(vol_path, run_as_root=True)
self.assertEqual('nfs', conn_info['driver_volume_type'])
self.assertEqual(volume.name, conn_info['data']['name'])
self.assertEqual(self.TEST_MNT_POINT_BASE,
conn_info['mount_point_base'])
@mock.patch.object(image_utils, 'qemu_img_info')
def test_initialize_connection_raise_exception(self, mock_img_info):
self._set_driver()
drv = self._driver
volume = self._simple_volume()
qemu_img_output = """image: %s
file format: iso
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
""" % volume['name']
mock_img_info.return_value = imageutils.QemuImgInfo(qemu_img_output)
self.assertRaises(exception.InvalidVolume,
drv.initialize_connection,
volume,
None)
def test_create_snapshot(self):
self._set_driver()
drv = self._driver
volume = self._simple_volume()
self.configuration.nfs_snapshot_support = True
fake_snap = fake_snapshot.fake_snapshot_obj(self.context)
fake_snap.volume = volume
vol_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(self.TEST_NFS_EXPORT1))
snap_file = volume['name'] + '.' + fake_snap.id
snap_path = os.path.join(vol_dir, snap_file)
info_path = os.path.join(vol_dir, volume['name']) + '.info'
with mock.patch.object(drv, '_local_path_volume_info',
return_value=info_path), \
mock.patch.object(drv, '_read_info_file', return_value={}), \
mock.patch.object(drv, '_do_create_snapshot') \
as mock_do_create_snapshot, \
mock.patch.object(drv, '_write_info_file') \
as mock_write_info_file, \
mock.patch.object(drv, 'get_active_image_from_info',
return_value=volume['name']), \
mock.patch.object(drv, '_get_new_snap_path',
return_value=snap_path):
self._driver.create_snapshot(fake_snap)
mock_do_create_snapshot.assert_called_with(fake_snap, volume['name'],
snap_path)
mock_write_info_file.assert_called_with(
info_path, {'active': snap_file, fake_snap.id: snap_file})
class NfsDriverDoSetupTestCase(test.TestCase):
def setUp(self):
super(NfsDriverDoSetupTestCase, self).setUp()
self.context = mock.Mock()
self.create_configuration()
def create_configuration(self):
config = conf.Configuration(None)
config.append_config_values(nfs.nfs_opts)
self.configuration = config
def test_setup_should_throw_error_if_shares_config_not_configured(self):
"""do_setup should throw error if shares config is not configured."""
self.override_config('nfs_shares_config', None)
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
with self.assertRaisesRegex(exception.NfsException,
".*no NFS config file configured.*"):
drv.do_setup(self.context)
self.assertEqual(0, mock_os_path_exists.call_count)
def test_setup_should_throw_error_if_shares_file_does_not_exist(self):
"""do_setup should throw error if shares file does not exist."""
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = False
with self.assertRaisesRegex(exception.NfsException,
"NFS config file.*doesn't exist"):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
def test_setup_should_not_throw_error_if_host_and_share_set(self):
"""do_setup shouldn't throw shares file error if host and share set."""
drv = nfs.NfsDriver(configuration=self.configuration)
self.override_config('nas_host', 'nfs-host1')
self.override_config('nas_share_path', '/export')
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = False
mock_set_nas_sec_options = self.mock_object(nfs.NfsDriver,
'set_nas_security_options')
mock_set_nas_sec_options.return_value = True
mock_execute = self.mock_object(drv, '_execute')
mock_execute.return_value = True
drv.do_setup(self.context)
mock_os_path_exists.assert_not_called()
def test_setup_throw_error_if_shares_file_does_not_exist_no_host(self):
"""do_setup should throw error if no shares file and no host set."""
drv = nfs.NfsDriver(configuration=self.configuration)
self.override_config('nas_share_path', '/export')
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = False
with self.assertRaisesRegex(exception.NfsException,
"NFS config file.*doesn't exist"):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
def test_setup_throw_error_if_shares_file_does_not_exist_no_share(self):
"""do_setup should throw error if no shares file and no share set."""
drv = nfs.NfsDriver(configuration=self.configuration)
self.override_config('nas_host', 'nfs-host1')
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = False
with self.assertRaisesRegex(exception.NfsException,
"NFS config file.*doesn't exist"):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
def test_setup_throw_error_if_shares_file_doesnt_exist_no_share_host(self):
"""do_setup should throw error if no shares file and no host/share."""
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = False
with self.assertRaisesRegex(exception.NfsException,
"NFS config file.*doesn't exist"):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
"""do_setup should throw error if nfs client is not installed."""
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = True
mock_execute = self.mock_object(drv, '_execute')
mock_execute.side_effect = OSError(
errno.ENOENT, 'No such file or directory.')
with self.assertRaisesRegex(exception.NfsException,
'mount.nfs is not installed'):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
mock_execute.assert_has_calls(
[mock.call('mount.nfs',
check_exit_code=False,
run_as_root=True)])
def test_setup_should_throw_exception_if_mount_nfs_command_fails(self):
"""do_setup should throw error if mount.nfs fails with OSError
This test covers the OSError path when mount.nfs is installed.
"""
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = True
mock_execute = self.mock_object(drv, '_execute')
mock_execute.side_effect = OSError(
errno.EPERM, 'Operation... BROKEN')
with self.assertRaisesRegex(OSError, '.*Operation... BROKEN'):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
mock_execute.assert_has_calls(
[mock.call('mount.nfs',
check_exit_code=False,
run_as_root=True)])
@mock.patch.object(os, 'rename')
def test_update_migrated_available_volume(self, rename_volume):
self._test_update_migrated_volume('available', rename_volume)
@mock.patch.object(os, 'rename')
def test_update_migrated_available_volume_rename_fail(self, rename_volume):
self._test_update_migrated_volume('available', rename_volume,
rename_exception=True)
@mock.patch.object(os, 'rename')
def test_update_migrated_in_use_volume(self, rename_volume):
self._test_update_migrated_volume('in-use', rename_volume)
def _test_update_migrated_volume(self, volume_status, rename_volume,
rename_exception=False):
drv = nfs.NfsDriver(configuration=self.configuration)
fake_volume_id = 'f51b5730-13b7-11e6-a238-fa163e67a298'
fake_new_volume_id = '12341234-13b7-11e6-a238-fa163e67a298'
fake_provider_source = 'fake_provider_source'
fake_provider = 'fake_provider'
base_dir = '/dir_base/'
volume_name_template = 'volume-%s'
original_volume_name = volume_name_template % fake_volume_id
current_name = volume_name_template % fake_new_volume_id
original_volume_path = base_dir + original_volume_name
current_path = base_dir + current_name
volume = fake_volume.fake_volume_obj(
self.context,
id=fake_volume_id,
size=1,
provider_location=fake_provider_source,
_name_id=None)
new_volume = fake_volume.fake_volume_obj(
self.context,
id=fake_new_volume_id,
size=1,
provider_location=fake_provider,
_name_id=None)
with mock.patch.object(drv, 'local_path') as local_path:
local_path.return_value = base_dir + current_name
if volume_status == 'in-use':
update = drv.update_migrated_volume(self.context,
volume,
new_volume,
volume_status)
self.assertEqual({'_name_id': fake_new_volume_id,
'provider_location': fake_provider}, update)
elif rename_exception:
rename_volume.side_effect = OSError
update = drv.update_migrated_volume(self.context,
volume,
new_volume,
volume_status)
rename_volume.assert_called_once_with(current_path,
original_volume_path)
self.assertEqual({'_name_id': fake_new_volume_id,
'provider_location': fake_provider}, update)
else:
update = drv.update_migrated_volume(self.context,
volume,
new_volume,
volume_status)
rename_volume.assert_called_once_with(current_path,
original_volume_path)
self.assertEqual({'_name_id': None,
'provider_location': fake_provider}, update)
def test_retype_is_there(self):
"Ensure that driver.retype() is there."""
drv = nfs.NfsDriver(configuration=self.configuration)
v1 = fake_volume.fake_volume_obj(self.context)
ret = drv.retype(self.context,
v1,
mock.sentinel.new_type,
mock.sentinel.diff,
mock.sentinel.host)
self.assertEqual((False, None), ret)
| 41.520599
| 79
| 0.620843
|
4a060e12fdc1396a0acd0d193d5623143bef1605
| 29,684
|
py
|
Python
|
setup.py
|
sagarjadhav1456/pygame
|
7d3900d5b04dc8102f218cc60f3fbf5e06dd8fa1
|
[
"Python-2.0",
"OLDAP-2.3"
] | 2
|
2021-04-03T20:01:35.000Z
|
2021-09-09T23:42:21.000Z
|
setup.py
|
sagarjadhav1456/pygame
|
7d3900d5b04dc8102f218cc60f3fbf5e06dd8fa1
|
[
"Python-2.0",
"OLDAP-2.3"
] | null | null | null |
setup.py
|
sagarjadhav1456/pygame
|
7d3900d5b04dc8102f218cc60f3fbf5e06dd8fa1
|
[
"Python-2.0",
"OLDAP-2.3"
] | 1
|
2020-10-31T11:32:34.000Z
|
2020-10-31T11:32:34.000Z
|
#!/usr/bin/env python
#
# This is the distutils setup script for pygame.
# Full instructions are in https://www.pygame.org/wiki/GettingStarted
#
# To configure, compile, install, just run this script.
# python setup.py install
DESCRIPTION = """Pygame is a Python wrapper module for the
SDL multimedia library. It contains python functions and classes
that will allow you to use SDL's support for playing cdroms,
audio and video output, and keyboard, mouse and joystick input."""
EXTRAS = {}
METADATA = {
"name": "pygame",
"version": "2.0.0.dev11",
"license": "LGPL",
"url": "https://www.pygame.org",
"author": "A community project.",
"author_email": "pygame@pygame.org",
"description": "Python Game Development",
"long_description": DESCRIPTION,
}
import re
import sys
import os
# just import these always and fail early if not present
import distutils
from setuptools import setup
IS_PYPY = '__pypy__' in sys.builtin_module_names
def compilation_help():
""" On failure point people to a web page for help.
"""
import platform
the_system = platform.system()
if the_system == 'Linux':
if hasattr(platform, 'linux_distribution'):
distro = platform.linux_distribution()
if distro[0].lower() == 'ubuntu':
the_system = 'Ubuntu'
elif distro[0].lower() == 'debian':
the_system = 'Debian'
help_urls = {
'Linux': 'https://www.pygame.org/wiki/Compilation',
'Ubuntu': 'https://www.pygame.org/wiki/CompileUbuntu',
'Debian': 'https://www.pygame.org/wiki/CompileDebian',
'Windows': 'https://www.pygame.org/wiki/CompileWindows',
'Darwin': 'https://www.pygame.org/wiki/MacCompile',
}
default = 'https://www.pygame.org/wiki/Compilation'
url = help_urls.get(the_system, default)
if IS_PYPY:
url += '\n https://www.pygame.org/wiki/CompilePyPy'
print ('\n---')
print ('For help with compilation see:')
print (' %s' % url)
print ('To contribute to pygame development see:')
print (' https://www.pygame.org/contribute.html')
print ('---\n')
if not hasattr(sys, 'version_info') or sys.version_info < (2,7):
compilation_help()
raise SystemExit("Pygame requires Python version 2.7 or above.")
if sys.version_info >= (3, 0) and sys.version_info < (3, 4):
compilation_help()
raise SystemExit("Pygame requires Python3 version 3.5 or above.")
if IS_PYPY and sys.pypy_version_info < (7,):
raise SystemExit("Pygame requires PyPy version 7.0.0 above, compatible with CPython 2.7 or CPython 3.5+")
def consume_arg(name):
if name in sys.argv:
sys.argv.remove(name)
return True
return False
#get us to the correct directory
path = os.path.split(os.path.abspath(sys.argv[0]))[0]
os.chdir(path)
#os.environ["CFLAGS"] = "-W -Wall -Wpointer-arith -Wcast-qual -Winline " + \
# "-Wcast-align -Wconversion -Wstrict-prototypes " + \
# "-Wmissing-prototypes -Wmissing-declarations " + \
# "-Wnested-externs -Wshadow -Wredundant-decls"
if consume_arg("-warnings"):
os.environ["CFLAGS"] = "-W -Wimplicit-int " + \
"-Wimplicit-function-declaration " + \
"-Wimplicit -Wmain -Wreturn-type -Wunused -Wswitch " + \
"-Wcomment -Wtrigraphs -Wformat -Wchar-subscripts " + \
"-Wuninitialized -Wparentheses " +\
"-Wpointer-arith -Wcast-qual -Winline -Wcast-align " + \
"-Wconversion -Wstrict-prototypes " + \
"-Wmissing-prototypes -Wmissing-declarations " + \
"-Wnested-externs -Wshadow -Wredundant-decls"
if consume_arg('-pygame-ci'):
cflags = os.environ.get('CFLAGS', '')
if cflags:
cflags += ' '
cflags += '-Werror=nested-externs -Werror=switch -Werror=implicit ' + \
'-Werror=implicit-function-declaration -Werror=return-type ' + \
'-Werror=implicit-int -Werror=main -Werror=pointer-arith ' + \
'-Werror=format-security -Werror=uninitialized ' + \
'-Werror=trigraphs -Werror=parentheses -Werror=unused-value ' + \
'-Werror=cast-align -Werror=int-conversion ' + \
'-Werror=incompatible-pointer-types'
os.environ['CFLAGS'] = cflags
STRIPPED=False
# STRIPPED builds don't have developer resources like docs or tests
if "PYGAME_ANDROID" in os.environ:
# test cases and docs are useless inside an APK
STRIPPED=True
if consume_arg('-stripped'):
STRIPPED=True
enable_arm_neon = False
if consume_arg('-enable-arm-neon'):
enable_arm_neon = True
cflags = os.environ.get('CFLAGS', '')
if cflags:
cflags += ' '
cflags += '-mfpu=neon'
os.environ['CFLAGS'] = cflags
if consume_arg('cython'):
# compile .pyx files
# So you can `setup.py cython` or `setup.py cython install`
try:
from Cython.Build.Dependencies import cythonize_one
except ImportError:
print("You need cython. https://cython.org/, pip install cython --user")
sys.exit(1)
from Cython.Build.Dependencies import create_extension_list
from Cython.Build.Dependencies import create_dependency_tree
try:
from Cython.Compiler.Main import Context
from Cython.Compiler.Options import CompilationOptions, default_options
c_options = CompilationOptions(default_options)
ctx = Context.from_options(c_options)
except ImportError:
from Cython.Compiler.Main import Context, CompilationOptions, default_options
c_options = CompilationOptions(default_options)
ctx = c_options.create_context()
import glob
pyx_files = glob.glob(os.path.join('src_c', 'cython', 'pygame', '*.pyx')) + \
glob.glob(os.path.join('src_c', 'cython', 'pygame', '**', '*.pyx'))
pyx_files, pyx_meta = create_extension_list(pyx_files, ctx=ctx)
deps = create_dependency_tree(ctx)
queue = []
for ext in pyx_files:
pyx_file = ext.sources[0] # TODO: check all sources, extension
c_file = os.path.splitext(pyx_file)[0].split(os.path.sep)
del c_file[1:3] # output in src_c/
c_file = os.path.sep.join(c_file) + '.c'
# update outdated .c files
if os.path.isfile(c_file):
c_timestamp = os.path.getmtime(c_file)
if c_timestamp < deps.timestamp(pyx_file):
dep_timestamp, dep = deps.timestamp(pyx_file), pyx_file
priority = 0
else:
dep_timestamp, dep = deps.newest_dependency(pyx_file)
priority = 2 - (dep in deps.immediate_dependencies(pyx_file))
if dep_timestamp > c_timestamp:
outdated = True
else:
outdated = False
else:
outdated = True
priority = 0
if outdated:
print('Compiling {} because it changed.'.format(pyx_file))
queue.append((priority, dict( pyx_file=pyx_file, c_file=c_file, fingerprint=None, quiet=False,
options=c_options, full_module_name=ext.name,
embedded_metadata=pyx_meta.get(ext.name) )))
# compile in right order
queue.sort(key=lambda a: a[0])
queue = [pair[1] for pair in queue]
count = len(queue)
for i, kwargs in enumerate(queue):
kwargs['progress'] = '[{}/{}] '.format(i + 1, count)
cythonize_one(**kwargs)
AUTO_CONFIG = False
if consume_arg('-auto'):
AUTO_CONFIG = True
import os.path, glob, stat, shutil
import distutils.sysconfig
from distutils.core import setup, Command
from distutils.extension import read_setup_file
from distutils.command.install_data import install_data
from distutils.command.sdist import sdist
revision = ''
def add_datafiles(data_files, dest_dir, pattern):
"""Add directory structures to data files according to a pattern"""
src_dir, elements = pattern
def do_directory(root_dest_path, root_src_path, elements):
files = []
for e in elements:
if isinstance(e, list):
src_dir, elems = e
dest_path = '/'.join([root_dest_path, src_dir])
src_path = os.path.join(root_src_path, src_dir)
do_directory(dest_path, src_path, elems)
else:
files.extend(glob.glob(os.path.join(root_src_path, e)))
if files:
data_files.append((root_dest_path, files))
do_directory(dest_dir, src_dir, elements)
# # allow optionally using setuptools for bdist_egg.
# if consume_arg("-setuptools") in sys.argv:
# from setuptools import setup
# sys.argv.remove ("-setuptools")
# we need to eat this argument in to distutils doesn't trip over it
consume_arg("-setuptools")
# NOTE: the bdist_mpkg_support is for darwin.
try:
import bdist_mpkg_support
except ImportError:
pass
else:
EXTRAS.update({
'options': bdist_mpkg_support.options,
'setup_requires': ['bdist_mpkg>=0.4.2'],
#'install_requires': ['pyobjc'],
#'dependency_links': ['http://rene.f0o.com/~rene/stuff/macosx/']
})
#headers to install
headers = glob.glob(os.path.join('src_c', '*.h'))
headers.remove(os.path.join('src_c', 'scale.h'))
headers.append(os.path.join('src_c', 'include'))
import distutils.command.install_headers
# monkey patch distutils header install to copy over directories
def run_install_headers(self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
if os.path.isdir(header):
destdir=os.path.join(self.install_dir, os.path.basename(header))
self.mkpath(destdir)
for entry in os.listdir(header):
header1=os.path.join(header, entry)
if not os.path.isdir(header1):
(out, _) = self.copy_file(header1, destdir)
self.outfiles.append(out)
else:
(out, _) = self.copy_file(header, self.install_dir)
self.outfiles.append(out)
distutils.command.install_headers.install_headers.run = run_install_headers
# option for not installing the headers.
if consume_arg("-noheaders"):
headers = []
#sanity check for any arguments
if len(sys.argv) == 1 and sys.stdout.isatty():
if sys.version_info[0] >= 3:
reply = input('\nNo Arguments Given, Perform Default Install? [Y/n]')
else:
reply = raw_input('\nNo Arguments Given, Perform Default Install? [Y/n]')
if not reply or reply[0].lower() != 'n':
sys.argv.append('install')
#make sure there is a Setup file
if AUTO_CONFIG or not os.path.isfile('Setup'):
print ('\n\nWARNING, No "Setup" File Exists, Running "buildconfig/config.py"')
import buildconfig.config
try:
buildconfig.config.main(AUTO_CONFIG)
except:
compilation_help()
raise
if '-config' in sys.argv:
sys.exit(0)
print ('\nContinuing With "setup.py"')
try:
s_mtime = os.stat("Setup")[stat.ST_MTIME]
sin_mtime = os.stat(os.path.join('buildconfig', 'Setup.SDL1.in'))[stat.ST_MTIME]
if sin_mtime > s_mtime:
print ('\n\nWARNING, "buildconfig/Setup.SDL1.in" newer than "Setup",'
'you might need to modify "Setup".')
except OSError:
pass
# get compile info for all extensions
try:
extensions = read_setup_file('Setup')
except:
print ("""Error with the "Setup" file,
perhaps make a clean copy from "Setup.in".""")
compilation_help()
raise
# Only define the ARM_NEON defines if they have been enabled at build time.
if enable_arm_neon:
for e in extensions:
e.define_macros.append(('PG_ENABLE_ARM_NEON', '1'))
# decide whether or not to enable new buffer protocol support (PEP 3118)
# old CPython versions without newbuf are no longer supported!
# new PYPY also supports PEP 3118
enable_newbuf = True
# TODO: remove all PG_ENABLE_NEWBUF conditionals from C code
# and just fail when PEP 3118 (Py_TPFLAGS_HAVE_NEWBUFFER) is not present
# then remove this logic
for e in extensions:
e.define_macros.append(('ENABLE_NEWBUF', '1'))
# if not building font, try replacing with ftfont
alternate_font = os.path.join('src_py', 'font.py')
if os.path.exists(alternate_font):
os.remove(alternate_font)
have_font = False
have_freetype = False
for e in extensions:
if e.name == 'font':
have_font = True
if e.name == '_freetype':
have_freetype = True
if not have_font and have_freetype:
shutil.copyfile(os.path.join('src_py', 'ftfont.py'), alternate_font)
#extra files to install
data_path = os.path.join(distutils.sysconfig.get_python_lib(), 'pygame')
pygame_data_files = []
data_files = [('pygame', pygame_data_files)]
#add files in distribution directory
# pygame_data_files.append('LGPL')
# pygame_data_files.append('readme.html')
# pygame_data_files.append('install.html')
add_stubs = True
# add *.pyi files into distribution directory
if add_stubs:
type_files = glob.glob(os.path.join('buildconfig', 'pygame-stubs', '*.pyi'))
for type_file in type_files:
pygame_data_files.append(type_file)
_sdl2 = glob.glob(os.path.join('buildconfig', 'pygame-stubs', '_sdl2', '*.pyi'))
if _sdl2:
_sdl2_data_files = []
data_files.append(('pygame/_sdl2', _sdl2_data_files))
for type_file in _sdl2:
_sdl2_data_files.append(type_file)
#add non .py files in lib directory
for f in glob.glob(os.path.join('src_py', '*')):
if not f[-3:] == '.py' and not f[-4:] == '.doc' and os.path.isfile(f):
pygame_data_files.append(f)
# We don't need to deploy tests, example code, or docs inside a game
#tests/fixtures
add_datafiles(data_files, 'pygame/tests',
['test',
[['fixtures',
[['xbm_cursors',
['*.xbm']],
['fonts',
['*.ttf', '*.otf', '*.bdf', '*.png']]]]]])
#examples
add_datafiles(data_files, 'pygame/examples',
['examples',
['readme.rst',
['data',
['*']],
['macosx',
['*.py',
['aliens_app_example',
['*.py',
'README.txt',
['English.lproj',
['aliens.icns',
['MainMenu.nib',
['*']]]]]]]]]])
#docs
add_datafiles(data_files, 'pygame/docs',
['docs',
['*.html', # Navigation and help pages
'*.gif', # pygame logos
'*.js', # For doc search
['ref', # pygame reference
['*.html', # Reference pages
'*.js', # Comments script
'*.json']], # Comment data
['c_api', # pygame C API
['*.html']],
['tut', # Tutorials
['*.html',
['tom',
['*.html',
'*.png']]]],
['_static', # Sphinx added support files
['*.css',
'*.png',
'*.ico',
'*.js']],
['_images', # Sphinx added reST ".. image::" refs
['*.jpg',
'*.png',
'*.gif']],
['_sources', # Used for ref search
['*.txt',
['ref',
['*.txt']]]]]])
#generate the version module
def parse_version(ver):
return ', '.join(s for s in re.findall(r'\d+', ver)[0:3])
def parse_source_version():
pgh_major = -1
pgh_minor = -1
pgh_patch = -1
major_exp_search = re.compile(r'define\s+PG_MAJOR_VERSION\s+([0-9]+)').search
minor_exp_search = re.compile(r'define\s+PG_MINOR_VERSION\s+([0-9]+)').search
patch_exp_search = re.compile(r'define\s+PG_PATCH_VERSION\s+([0-9]+)').search
pg_header = os.path.join('src_c', 'include', '_pygame.h')
with open(pg_header) as f:
for line in f:
if pgh_major == -1:
m = major_exp_search(line)
if m: pgh_major = int(m.group(1))
if pgh_minor == -1:
m = minor_exp_search(line)
if m: pgh_minor = int(m.group(1))
if pgh_patch == -1:
m = patch_exp_search(line)
if m: pgh_patch = int(m.group(1))
if pgh_major == -1:
raise SystemExit("_pygame.h: cannot find PG_MAJOR_VERSION")
if pgh_minor == -1:
raise SystemExit("_pygame.h: cannot find PG_MINOR_VERSION")
if pgh_patch == -1:
raise SystemExit("_pygame.h: cannot find PG_PATCH_VERSION")
return (pgh_major, pgh_minor, pgh_patch)
def write_version_module(pygame_version, revision):
vernum = parse_version(pygame_version)
src_vernum = parse_source_version()
if vernum != ', '.join(str(e) for e in src_vernum):
raise SystemExit("_pygame.h version differs from 'METADATA' version"
": %s vs %s" % (vernum, src_vernum))
with open(os.path.join('buildconfig', 'version.py.in'), 'r') as header_file:
header = header_file.read()
with open(os.path.join('src_py', 'version.py'), 'w') as version_file:
version_file.write(header)
version_file.write('ver = "' + pygame_version + '" # pylint: disable=invalid-name\n')
version_file.write('vernum = PygameVersion(%s)\n' % vernum)
version_file.write('rev = "' + revision + '" # pylint: disable=invalid-name\n')
version_file.write('\n__all__ = ["SDL", "ver", "vernum", "rev"]\n')
write_version_module(METADATA['version'], revision)
#required. This will be filled if doing a Windows build.
cmdclass = {}
def add_command(name):
def decorator(command):
assert issubclass(command, Command)
cmdclass[name]=command
return command
return decorator
#try to find DLLs and copy them too (only on windows)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext
#add dependency DLLs to the project
lib_dependencies = {}
for e in extensions:
if e.name.startswith('COPYLIB_'):
lib_dependencies[e.name[8:]] = e.libraries
def dependencies(roots):
"""Return a set of dependencies for the list of library file roots
The return set is a dictionary keyed on library root name with values of 1.
"""
root_set = {}
for root in roots:
try:
deps = lib_dependencies[root]
except KeyError:
pass
else:
root_set[root] = 1
root_set.update(dependencies(deps))
return root_set
the_dlls = {}
required_dlls = {}
for e in extensions:
if e.name.startswith('COPYLIB_'):
the_dlls[e.name[8:]] = e.library_dirs[0]
else:
required_dlls.update(dependencies(e.libraries))
# join the required_dlls and the_dlls keys together.
lib_names = {}
for lib in list(required_dlls.keys()) + list(the_dlls.keys()):
lib_names[lib] = 1
for lib in lib_names.keys():
#next DLL; a distutils bug requires the paths to have Windows separators
f = the_dlls[lib].replace('/', os.sep)
if f == '_':
print ("WARNING, DLL for %s library not found." % lib)
else:
pygame_data_files.append(f)
if '-enable-msvc-analyze' in sys.argv:
# calculate the MSVC compiler version as an int
msc_pos = sys.version.find('MSC v.')
msc_ver = 1900
if msc_pos != -1:
msc_ver = int(sys.version[msc_pos + 6:msc_pos + 10])
print ('Analyzing with MSC_VER =', msc_ver)
# excluding system headers from analyze out put was only added after MSCV_VER 1913
if msc_ver >= 1913:
os.environ['CAExcludePath'] = 'C:\\Program Files (x86)\\'
for e in extensions:
e.extra_compile_args += ['/analyze', '/experimental:external',
'/external:W0', '/external:env:CAExcludePath' ]
else:
for e in extensions:
e.extra_compile_args += ['/analyze']
def has_flag(compiler, flagname):
"""
Adapted from here: https://github.com/pybind/python_example/blob/master/setup.py#L37
"""
from distutils.errors import CompileError
import tempfile
root_drive = os.path.splitdrive(sys.executable)[0] + '\\'
with tempfile.NamedTemporaryFile('w', suffix='.cpp', delete=False) as f:
f.write('int main (int argc, char **argv) { return 0; }')
fname = f.name
try:
compiler.compile([fname], output_dir=root_drive, extra_postargs=[flagname])
except CompileError:
return False
else:
try:
base_file = os.path.splitext(fname)[0]
obj_file = base_file + '.obj'
os.remove(obj_file)
except OSError:
pass
finally:
try:
os.remove(fname)
except OSError:
pass
return True
# filter flags, returns list of accepted flags
def flag_filter(compiler, *flags):
return [flag for flag in flags if has_flag(compiler, flag)]
@add_command('build_ext')
class WinBuildExt(build_ext):
"""This build_ext sets necessary environment variables for MinGW"""
# __sdl_lib_dir is possible location of msvcrt replacement import
# libraries, if they exist. Pygame module base only links to SDL so
# should have the SDL library directory as its only -L option.
for e in extensions:
if e.name == 'base':
__sdl_lib_dir = e.library_dirs[0].replace('/', os.sep)
break
def build_extensions(self):
# Add supported optimisations flags to reduce code size with MSVC
opts = flag_filter(self.compiler, "/GF", "/Gy")
for extension in extensions:
extension.extra_compile_args += opts
build_ext.build_extensions(self)
# Add the precompiled smooth scale MMX functions to transform.
def replace_scale_mmx():
for e in extensions:
if e.name == 'transform':
if '64 bit' in sys.version:
e.extra_objects.append(
os.path.join('buildconfig', 'obj', 'win64', 'scale_mmx.obj'))
else:
e.extra_objects.append(
os.path.join('buildconfig', 'obj', 'win32', 'scale_mmx.obj'))
for i in range(len(e.sources)):
if e.sources[i].endswith('scale_mmx.c'):
del e.sources[i]
return
replace_scale_mmx()
#clean up the list of extensions
for e in extensions[:]:
if e.name.startswith('COPYLIB_'):
extensions.remove(e) #don't compile the COPYLIBs, just clean them
else:
e.name = 'pygame.' + e.name #prepend package name on modules
#data installer with improved intelligence over distutils
#data files are copied into the project directory instead
#of willy-nilly
@add_command('install_data')
class smart_install_data(install_data):
def run(self):
#need to change self.install_dir to the actual library dir
install_cmd = self.get_finalized_command('install')
self.install_dir = getattr(install_cmd, 'install_lib')
return install_data.run(self)
@add_command('sdist')
class OurSdist(sdist):
def initialize_options(self):
super(sdist, self).initialize_options()
# we do not want MANIFEST.in to appear in the root cluttering up things.
self.template = os.path.join('buildconfig', 'MANIFEST.in')
if "bdist_msi" in sys.argv:
# if you are making an msi, we want it to overwrite files
# we also want to include the repository revision in the file name
from distutils.command import bdist_msi
import msilib
@add_command('bdist_msi')
class bdist_msi_overwrite_on_install(bdist_msi.bdist_msi):
def run(self):
bdist_msi.bdist_msi.run(self)
# Remove obsolete files.
comp = "pygame1" # Pygame component
prop = comp # Directory property
records = [("surfarray.pyd", comp,
"SURFAR~1.PYD|surfarray.pyd", prop, 1),
("sndarray.pyd", comp,
"SNDARRAY.PYD|sndarray.pyd", prop, 1),
("camera.pyd", comp, "CAMERA.PYD|camera.pyd", prop, 1),
("color.py", comp, "COLOR.PY|color.py", prop, 1),
("color.pyc", comp, "COLOR.PYC|color.pyc", prop, 1),
("color.pyo", comp, "COLOR.PYO|color.pyo", prop, 1)]
msilib.add_data(self.db, "RemoveFile", records)
# Overwrite outdated files.
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
print ("changing %s to overwrite files on install" % installer_name)
msilib.add_data(self.db, "Property", [("REINSTALLMODE", "amus")])
self.db.Commit()
def get_installer_filename(self, fullname):
if revision:
fullname += '-hg_' + revision
return bdist_msi.bdist_msi.get_installer_filename(self, fullname)
# test command. For doing 'python setup.py test'
@add_command('test')
class TestCommand(Command):
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
'''
runs the tests with default options.
'''
import subprocess
return subprocess.call([sys.executable, os.path.join('test', '__main__.py')])
@add_command('docs')
class DocsCommand(Command):
""" For building the pygame documentation with `python setup.py docs`.
This generates html, and documentation .h header files.
"""
user_options = [ ]
def initialize_options(self):
self._dir = os.getcwd()
def finalize_options(self):
pass
def run(self):
'''
runs the tests with default options.
'''
docs_help = (
"Building docs requires Python version 3.6 or above, and sphinx."
)
if not hasattr(sys, 'version_info') or sys.version_info < (3, 6):
raise SystemExit(docs_help)
import subprocess
try:
return subprocess.call([
sys.executable, os.path.join('buildconfig', 'makeref.py')]
)
except:
print(docs_help)
raise
# Prune empty file lists.
date_files = [(path, files) for path, files in data_files if files]
#finally,
#call distutils with all needed info
PACKAGEDATA = {
"cmdclass": cmdclass,
"packages": ['pygame',
'pygame.threads',
'pygame._sdl2',
'pygame.tests',
'pygame.tests.test_utils',
'pygame.tests.run_tests__tests',
'pygame.tests.run_tests__tests.all_ok',
'pygame.tests.run_tests__tests.failures1',
'pygame.tests.run_tests__tests.incomplete',
'pygame.tests.run_tests__tests.infinite_loop',
'pygame.tests.run_tests__tests.print_stderr',
'pygame.tests.run_tests__tests.print_stdout',
'pygame.tests.run_tests__tests.incomplete_todo',
'pygame.tests.run_tests__tests.exclude',
'pygame.tests.run_tests__tests.timeout',
'pygame.tests.run_tests__tests.everything',
'pygame.docs',
'pygame.examples'],
"package_dir": {'pygame': 'src_py',
'pygame._sdl2': 'src_py/_sdl2',
'pygame.threads': 'src_py/threads',
'pygame.tests': 'test',
'pygame.docs': 'docs',
'pygame.examples': 'examples'},
"headers": headers,
"ext_modules": extensions,
"data_files": data_files,
"zip_safe": False,
}
if STRIPPED:
PACKAGEDATA = {
"cmdclass": cmdclass,
"packages": ['pygame',
'pygame.threads',
'pygame._sdl2'],
"package_dir": {'pygame': 'src_py',
'pygame._sdl2': 'src_py/_sdl2',
'pygame.threads': 'src_py/threads'},
"ext_modules": extensions,
"zip_safe": False,
}
PACKAGEDATA.update(METADATA)
PACKAGEDATA.update(EXTRAS)
try:
setup(**PACKAGEDATA)
except:
compilation_help()
raise
| 35.850242
| 109
| 0.583681
|
4a060f0c75f66abe18e471a6b7e2ccdaaba062c3
| 37,665
|
py
|
Python
|
venv1/Lib/site-packages/tensorflow/contrib/eager/python/checkpointable_utils.py
|
Soum-Soum/Tensorflow_Face_Finder
|
fec6c15d2df7012608511ad87f4b55731bf99478
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
venv1/Lib/site-packages/tensorflow/contrib/eager/python/checkpointable_utils.py
|
Soum-Soum/Tensorflow_Face_Finder
|
fec6c15d2df7012608511ad87f4b55731bf99478
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-05-20T00:58:04.000Z
|
2021-05-20T00:58:04.000Z
|
venv1/Lib/site-packages/tensorflow/contrib/eager/python/checkpointable_utils.py
|
Soum-Soum/Tensorflow_Face_Finder
|
fec6c15d2df7012608511ad87f4b55731bf99478
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""Utilities for working with Checkpointable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import weakref
from tensorflow.contrib.eager.proto import checkpointable_object_graph_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpointable as core_checkpointable
from tensorflow.python.training import checkpointable_utils as core_checkpointable_utils
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import deprecation
_ESCAPE_CHAR = "." # For avoiding conflicts with user-specified names.
# Keyword for identifying that the next bit of a checkpoint variable name is a
# slot name. Checkpoint names for slot variables look like:
#
# <path to variable>/<_OPTIMIZER_SLOTS_NAME>/<path to optimizer>/<slot name>
#
# Where <path to variable> is a full path from the checkpoint root to the
# variable being slotted for.
_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + "OPTIMIZER_SLOT"
# Keyword for separating the path to an object from the name of an
# attribute in checkpoint names. Used like:
# <path to variable>/<_OBJECT_ATTRIBUTES_NAME>/<name of attribute>
_OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + "ATTRIBUTES"
# Key where the object graph proto is saved in a TensorBundle
_OBJECT_GRAPH_PROTO_KEY = "_CHECKPOINTABLE_OBJECT_GRAPH"
# TODO(allenl): If this ends up in a public API, consider adding LINT.IfChange
# or consolidating the implementation with get_variable.
def _default_getter(name, shape, dtype, initializer=None,
partition_info=None, **kwargs):
"""A pared-down version of get_variable which does not reuse variables."""
dtype = dtypes.as_dtype(dtype)
shape_object = tensor_shape.as_shape(shape)
with ops.init_scope():
if initializer is None:
initializer, initializing_from_value = (
variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access
name=name, shape=shape_object, dtype=dtype))
else:
initializing_from_value = not callable(initializer)
# Same logic as get_variable
variable_dtype = dtype.base_dtype
if initializing_from_value:
if shape is not None:
raise ValueError("If initializer is a constant, do not specify shape.")
initial_value = initializer
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
def initial_value():
return initializer(
shape_object.as_list(), dtype=dtype, partition_info=partition_info)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
name=name,
dtype=variable_dtype,
**kwargs
)
def add_variable(checkpointable, name, shape=None, dtype=dtypes.float32,
initializer=None):
"""Add a variable to a Checkpointable with no scope influence."""
return checkpointable._add_variable_with_custom_getter( # pylint: disable=protected-access
name=name, shape=shape, dtype=dtype,
initializer=initializer, getter=_default_getter)
def _breadth_first_checkpointable_traversal(root_checkpointable):
"""Find shortest paths to all variables owned by dependencies of root."""
bfs_sorted = []
to_visit = collections.deque([root_checkpointable])
path_to_root = {root_checkpointable: ()}
while to_visit:
current_checkpointable = to_visit.popleft()
current_checkpointable._maybe_initialize_checkpointable() # pylint: disable=protected-access
bfs_sorted.append(current_checkpointable)
for child_checkpointable in (
current_checkpointable._checkpoint_dependencies): # pylint: disable=protected-access
if child_checkpointable.ref not in path_to_root:
path_to_root[child_checkpointable.ref] = (
path_to_root[current_checkpointable] + (child_checkpointable,))
to_visit.append(child_checkpointable.ref)
return bfs_sorted, path_to_root
def _escape_local_name(name):
# We need to support slashes in local names for compatibility, since this
# naming scheme is being patched in to things like Layer.add_variable where
# slashes were previously accepted. We also want to use slashes to indicate
# edges traversed to reach the variable, so we escape forward slashes in
# names.
return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR)
.replace(r"/", _ESCAPE_CHAR + "S"))
def _object_prefix_from_path(path_to_root):
return "/".join(
(_escape_local_name(checkpointable.name)
for checkpointable in path_to_root))
def _slot_variable_naming_for_optimizer(optimizer_path):
"""Make a function for naming slot variables in an optimizer."""
# Name slot variables:
#
# <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>
#
# where <variable name> is exactly the checkpoint name used for the original
# variable, including the path from the checkpoint root and the local name in
# the object which owns it. Note that we only save slot variables if the
# variable it's slotting for is also being saved.
optimizer_identifier = "/%s/%s/" % (_OPTIMIZER_SLOTS_NAME, optimizer_path)
def _name_slot_variable(variable_path, slot_name):
"""With an optimizer specified, name a slot variable."""
return (variable_path
+ optimizer_identifier
+ _escape_local_name(slot_name))
return _name_slot_variable
def _serialize_slot_variables(checkpointable_objects, node_ids, object_names):
"""Gather and name slot variables."""
non_slot_objects = list(checkpointable_objects)
slot_variables = {}
for checkpointable in non_slot_objects:
if isinstance(checkpointable, optimizer_lib.Optimizer):
naming_scheme = _slot_variable_naming_for_optimizer(
optimizer_path=object_names[checkpointable])
slot_names = checkpointable.get_slot_names()
for slot_name in slot_names:
for original_variable_node_id, original_variable in enumerate(
non_slot_objects):
try:
slot_variable = checkpointable.get_slot(
original_variable, slot_name)
except AttributeError:
slot_variable = None
if slot_variable is None:
continue
slot_variable._maybe_initialize_checkpointable() # pylint: disable=protected-access
if slot_variable._checkpoint_dependencies: # pylint: disable=protected-access
# TODO(allenl): Gather dependencies of slot variables.
raise NotImplementedError(
"Currently only variables with no dependencies can be saved as "
"slot variables. File a feature request if this limitation "
"bothers you.")
if slot_variable in node_ids:
raise NotImplementedError(
"A slot variable was re-used as a dependency of a "
"Checkpointable object. This is not currently allowed. File a "
"feature request if this limitation bothers you.")
checkpoint_name = naming_scheme(
variable_path=object_names[original_variable],
slot_name=slot_name)
object_names[slot_variable] = checkpoint_name
slot_variable_node_id = len(checkpointable_objects)
node_ids[slot_variable] = slot_variable_node_id
checkpointable_objects.append(slot_variable)
slot_variable_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph
.Object.SlotVariableReference(
slot_name=slot_name,
original_variable_node_id=original_variable_node_id,
slot_variable_node_id=slot_variable_node_id))
slot_variables.setdefault(checkpointable, []).append(
slot_variable_proto)
return slot_variables
def _serialize_checkpointables(
checkpointable_objects, node_ids, object_names, slot_variables):
"""Name non-slot `Checkpointable`s and add them to `object_graph_proto`."""
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
named_saveables = {}
for checkpoint_id, checkpointable in enumerate(checkpointable_objects):
assert node_ids[checkpointable] == checkpoint_id
object_proto = object_graph_proto.nodes.add()
object_proto.slot_variables.extend(slot_variables.get(checkpointable, ()))
object_name = object_names[checkpointable]
for name, saveable in (
checkpointable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
attribute = object_proto.attributes.add()
attribute.name = name
attribute.checkpoint_key = "%s/%s/%s" % (
object_name, _OBJECT_ATTRIBUTES_NAME, _escape_local_name(name))
# Figure out the name-based Saver's name for this variable.
saver_dict = saver_lib.BaseSaverBuilder.OpListToDict(
[saveable], convert_variable_to_tensor=False)
attribute.full_name, = saver_dict.keys()
named_saveables[attribute.checkpoint_key] = saveable
for child in checkpointable._checkpoint_dependencies: # pylint: disable=protected-access
child_proto = object_proto.children.add()
child_proto.node_id = node_ids[child.ref]
child_proto.local_name = child.name
return named_saveables, object_graph_proto
def _serialize_object_graph(root_checkpointable):
"""Determine checkpoint keys for variables and build a serialized graph.
Non-slot variables are keyed based on a shortest path from the root saveable
to the object which owns the variable (i.e. the one which called
`Checkpointable._add_variable` to create it).
Slot variables are keyed based on a shortest path to the variable being
slotted for, a shortest path to their optimizer, and the slot name.
Args:
root_checkpointable: A `Checkpointable` object whose variables (including
the variables of dependencies, recursively) should be saved.
Returns:
A tuple of (named_variables, object_graph_proto):
named_variables: A dictionary mapping names to variable objects.
object_graph_proto: A CheckpointableObjectGraph protocol buffer containing
the serialized object graph and variable references.
Raises:
ValueError: If there are invalid characters in an optimizer's slot names.
"""
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
slot_variables = _serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return _serialize_checkpointables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names,
slot_variables=slot_variables)
def gather_initializers(root_checkpointable):
"""Traverse the object graph and find initialization ops.
Looks for `Checkpointable` objects which are dependencies of
`root_checkpointable` and which have an `initializer` property. Includes
initializers for slot variables only if the variable they are slotting for and
the optimizer are dependencies of `root_checkpointable` (i.e. if they would be
saved with a checkpoint).
Args:
root_checkpointable: A `Checkpointable` object to gather initializers for.
Returns:
A list of initialization ops.
"""
# TODO(allenl): Extract out gathering logic so the naming logic doesn't have
# to run.
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
_serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return [c.initializer for c in checkpointable_objects
if hasattr(c, "initializer") and c.initializer is not None]
class _NoRestoreSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, tensor, name):
spec = saver_lib.BaseSaverBuilder.SaveSpec(tensor, "", name)
super(_NoRestoreSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
class _LoadStatus(object):
"""Abstract base for load status callbacks."""
@abc.abstractmethod
def assert_consumed(self):
"""Raises an exception unless a non-trivial restoration has completed."""
pass
@abc.abstractmethod
def run_restore_ops(self, session=None):
"""Runs restore ops from the checkpoint. Requires a valid checkpoint."""
pass
@abc.abstractmethod
def initialize_or_restore(self, session=None):
"""Runs restore ops from the checkpoint, or initializes variables."""
pass
class CheckpointLoadStatus(_LoadStatus):
"""Checks the status of checkpoint loading and manages restore ops.
Returned from `Saver.restore`. Since `restore` may defer the loading of values
in the checkpoint which don't yet have corresponding Python objects,
`CheckpointLoadStatus` provides a callback to verify that checkpoint loading
is complete (`assert_consumed`).
When graph building, `restore` does not run restore ops itself since their
creation may be deferred. The `run_restore_ops` method must be called once all
Python objects with values to restore have been created and added to the
dependency graph (this does not necessarily have to be the whole checkpoint;
calling `run_restore_ops` while `assert_consumed` fails is supported and will
partially restore the checkpoint).
See `Saver.restore` for usage examples.
"""
def __init__(self, checkpoint, feed_dict):
self._checkpoint = checkpoint
self._feed_dict = feed_dict
def assert_consumed(self):
"""Asserts that all objects in the checkpoint have been created/matched.
Returns:
`self` for chaining.
Raises:
AssertionError: If there are any Python objects in the dependency graph
which have not been restored from this checkpoint or a later `restore`,
or if there are any checkpointed values which have not been matched to
Python objects.
"""
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
checkpointable = self._checkpoint.object_by_proto_id.get(node_id, None)
if checkpointable is None:
raise AssertionError("Unresolved object in checkpoint: %s" % (node,))
if checkpointable._update_uid < self._checkpoint.restore_uid: # pylint: disable=protected-access
raise AssertionError(
"Object not assigned a value from checkpoint: %s" % (node,))
if self._checkpoint.slot_restorations:
# Sanity check; this collection should be clear if everything has been
# restored.
raise AssertionError("Unresolved slot restorations: %s" % (
self._checkpoint.slot_restorations,))
if self._checkpoint.unused_attributes:
raise AssertionError(
("Unused attributes in these objects (the attributes exist in the "
"checkpoint but not in the objects): %s") % (
self._checkpoint.unused_attributes.items(),))
return self
def run_restore_ops(self, session=None):
"""Run operations to restore objects in the dependency graph."""
if context.executing_eagerly():
return # Run eagerly
if session is None:
session = ops.get_default_session()
session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`.
This method has a sibling in `InitializationOnlyStatus` which instead
initializes variables. That type is returned if no checkpoint is specified
in `Saver.restore`.
Args:
session: The session to run restore ops in. If `None`, uses the default
session.
"""
self.run_restore_ops(session=session)
class InitializationOnlyStatus(_LoadStatus):
"""Returned from `Saver.restore` when no checkpoint has been specified.
Objects of this type have the same `assert_consumed` method as
`CheckpointLoadStatus`, but it always fails. However,
`initialize_or_restore` works on objects of both types, and will
initialize variables in `InitializationOnlyStatus` objects or restore them
otherwise.
"""
def __init__(self, root_checkpointable):
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def run_restore_ops(self, session=None):
"""For consistency with `CheckpointLoadStatus`.
Use `initialize_or_restore` for initializing if no checkpoint was passed
to `Saver.restore` and restoring otherwise.
Args:
session: Not used.
"""
raise AssertionError(
"No checkpoint specified, so no restore ops are available "
"(save_path=None to Saver.restore).")
def initialize_or_restore(self, session=None):
"""Runs initialization ops for variables.
Only objects which would be saved by `Saver.save` will be initialized. See
`gather_initializers` for details.
This method does nothing when executing eagerly (initializers get run
eagerly).
Args:
session: The session to run initialization ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # run eagerly
if session is None:
session = ops.get_default_session()
session.run(gather_initializers(self._root_checkpointable))
_DEPRECATED_RESTORE_INSTRUCTIONS = (
"Restoring a name-based tf.train.Saver checkpoint using the object-based "
"restore API. This mode uses global names to match variables, and so is "
"somewhat fragile. It also adds new restore ops to the graph each time it "
"is called. Prefer re-encoding training checkpoints in the object-based "
"format: run save() on the object-based saver (the same one this message "
"is coming from) and use that checkpoint in the future.")
class NameBasedSaverStatus(_LoadStatus):
"""Status for loading a name-based training checkpoint."""
def __init__(self, object_saver, save_path):
self._object_saver = object_saver
self._save_path = save_path
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"Restoring a name-based checkpoint. No load status is available.")
@deprecation.deprecated(
date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)
def run_restore_ops(self, session=None):
"""Load the name-based training checkpoint using a new `tf.train.Saver`."""
if session is None and not context.executing_eagerly():
session = ops.get_default_session()
with ops.device("/cpu:0"):
saver_lib.Saver(self._object_saver._global_variable_names()).restore( # pylint: disable=protected-access
sess=session, save_path=self._save_path)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`."""
self.run_restore_ops(session=session)
class _SessionWithFeedDictAdditions(session_lib.SessionInterface):
"""Pretends to be a session, inserts extra feeds on run()."""
def __init__(self, session, feed_additions):
self._wrapped_session = session
self._feed_additions = feed_additions
def run(self, fetches, feed_dict=None, **kwargs):
if feed_dict is None:
feed_dict = {}
else:
feed_dict = feed_dict.copy()
feed_dict.update(self._feed_additions)
return self._wrapped_session.run(
fetches=fetches, feed_dict=feed_dict, **kwargs)
class CheckpointableSaver(object):
"""Saves and restores a `Checkpointable` object and its dependencies.
See `Checkpointable` for details of dependency management. `Saver` wraps
`tf.train.Saver` for saving, including extra information about the graph of
dependencies between Python objects. When restoring, it uses this information
about the save-time dependency graph to more robustly match objects with their
checkpointed values. When executing eagerly, it supports restoring variables
on object creation (see `Saver.restore`).
Values in a checkpoint are mapped to `Checkpointable` Python objects
(`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the
checkpoint was written. To avoid breaking existing checkpoints when modifying
a class, dependency names (the names of attributes to which `Checkpointable`
objects are assigned) may not change. These names are local to objects, in
contrast to the `Variable.name`-based save/restore from `tf.train.Saver`, and
so allow additional program transformations.
"""
def __init__(self, root_checkpointable):
"""Configure saving.
Args:
root_checkpointable: The root of the object graph to save/restore. This
object and all of its dependencies are saved in the checkpoint. When
restoring, objects are matched and restored starting from this root.
"""
# Allow passing in a weak reference to avoid reference cycles when
# `Checkpointable` objects save themselves.
self._root_checkpointable_ref = root_checkpointable
if not context.executing_eagerly():
with ops.device("/cpu:0"):
self._file_prefix_placeholder = constant_op.constant("model")
else:
self._file_prefix_placeholder = None
# Op caching for save
self._object_graph_feed_tensor = None
self._last_save_object_graph = None
self._last_save_saver = None
# Op caching for restore
self._object_graph_restore_tensor = None
self._last_restore_object_graph = None
self._last_restore_checkpoint = None
@property
def _root_checkpointable(self):
if isinstance(self._root_checkpointable_ref, weakref.ref):
derefed = self._root_checkpointable_ref()
assert derefed is not None
return derefed
else:
return self._root_checkpointable_ref
def save(self, file_prefix, checkpoint_number=None, session=None):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
Checkpointable objects it depends on at the time `Saver.save()` is called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `checkpoint_number`, if provided.
checkpoint_number: An integer variable or Tensor, used to number
checkpoints. Typically this value is saved along with other variables in
training checkpoints, which will happen automatically if it was created
by `root_checkpointable` or one of its dependencies (via
`Checkpointable._add_variable`).
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
named_variables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._object_graph_feed_tensor is None:
with ops.device("/cpu:0"):
self._object_graph_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
object_graph_tensor = self._object_graph_feed_tensor
feed_additions = {object_graph_tensor: graph_proto.SerializeToString()}
else:
session = None
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
feed_additions = None
assert _OBJECT_GRAPH_PROTO_KEY not in named_variables
named_variables[_OBJECT_GRAPH_PROTO_KEY] = _NoRestoreSaveable(
tensor=object_graph_tensor,
name=_OBJECT_GRAPH_PROTO_KEY)
if not in_graph_mode or self._last_save_object_graph != graph_proto:
if self._last_save_object_graph is not None and in_graph_mode:
raise NotImplementedError(
"Using a single Saver to save a mutated object graph is not "
"currently supported when graph building. Use a different Saver "
"when the object graph changes (save ops will be duplicated), or "
"file a feature request if this limitation bothers you.")
saver = saver_lib.Saver(var_list=named_variables)
if in_graph_mode:
self._last_save_saver = saver
self._last_save_object_graph = graph_proto
else:
saver = self._last_save_saver
with ops.device("/cpu:0"):
save_path = saver.save(
sess=_SessionWithFeedDictAdditions(
session=session, feed_additions=feed_additions),
save_path=file_prefix,
write_meta_graph=False,
global_step=checkpoint_number)
return save_path
def _global_variable_names(self):
"""Generate a `tf.train.Saver`-style `var_list` using `variable.name`s."""
named_saveables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
saver_names = {}
for object_proto in graph_proto.nodes:
for attribute_proto in object_proto.attributes:
saver_names[attribute_proto.full_name] = named_saveables[
attribute_proto.checkpoint_key]
return saver_names
def restore(self, save_path, session=None):
"""Restore a training checkpoint.
Restores `root_checkpointable` and any objects that it tracks
(transitive). Either assigns values immediately if variables to restore have
been created already, or defers restoration until the variables are
created. Dependencies added to the `root_checkpointable` passed to the
constructor after this call will be matched if they have a corresponding
object in the checkpoint.
When building a graph, restorations are added to the graph but not run. A
session is required to retrieve checkpoint metadata.
To disallow deferred loading, assert immediately that all checkpointed
variables have been matched to variable objects:
```python
saver = Saver(root)
saver.restore(path).assert_consumed()
```
An exception will be raised unless every object was matched and its
variables already exist.
When graph building, `assert_consumed()` indicates that all of the restore
ops which will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` function of the status object:
```python
saver.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
Name-based `tf.train.Saver` checkpoints can be loaded using this
method. There is no deferred loading, and names are used to match
variables. No restore ops are created/run until `run_restore_ops()` or
`initialize_or_restore()` are called on the returned status object, even
when executing eagerly. Re-encode name-based checkpoints using this
object-based `Saver.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency
graph. If the checkpoint was written by the name-based `tf.train.Saver`,
names are used to match variables.
session: The session to retrieve metadata with. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
A load status object, which can be used to make assertions about the
status of checkpoint restoration and run initialization/restore ops
(of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if
`save_path` is `None`).
If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`
object is returned which runs restore ops from a name-based saver.
"""
if save_path is None:
return InitializationOnlyStatus(self._root_checkpointable)
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
file_prefix_tensor = self._file_prefix_placeholder
file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}
else:
session = None
with ops.device("/cpu:0"):
file_prefix_tensor = constant_op.constant(save_path)
file_prefix_feed_dict = None
try:
if not in_graph_mode or self._object_graph_restore_tensor is None:
with ops.device("/cpu:0"):
object_graph_string, = io_ops.restore_v2(
prefix=file_prefix_tensor,
tensor_names=[_OBJECT_GRAPH_PROTO_KEY],
shape_and_slices=[""],
dtypes=[dtypes.string],
name="object_graph_proto_read")
if in_graph_mode:
self._object_graph_restore_tensor = object_graph_string
if in_graph_mode:
object_graph_string = session.run(
self._object_graph_restore_tensor,
feed_dict=file_prefix_feed_dict)
else:
object_graph_string = object_graph_string.numpy()
except errors_impl.NotFoundError:
# The object graph proto does not exist in this checkpoint. Try again with
# name-based saving.
return NameBasedSaverStatus(self, save_path)
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
if in_graph_mode and object_graph_proto == self._last_restore_object_graph:
checkpoint = self._last_restore_checkpoint
else:
if in_graph_mode:
dtype_map = None
else:
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
dtype_map = reader.get_variable_to_dtype_map()
checkpoint = core_checkpointable_utils._Checkpoint( # pylint: disable=protected-access
object_graph_proto=object_graph_proto,
save_path=file_prefix_tensor,
dtype_map=dtype_map)
if in_graph_mode:
if self._last_restore_object_graph is not None:
raise NotImplementedError(
"Using a single Saver to restore different object graphs is not "
"currently supported when graph building. Use a different Saver "
"for each object graph (restore ops will be duplicated), or "
"file a feature request if this limitation bothers you.")
self._last_restore_checkpoint = checkpoint
self._last_restore_object_graph = object_graph_proto
core_checkpointable._CheckpointPosition( # pylint: disable=protected-access
checkpoint=checkpoint, proto_id=0).restore(self._root_checkpointable)
load_status = CheckpointLoadStatus(
checkpoint, feed_dict=file_prefix_feed_dict)
return load_status
class Checkpoint(core_checkpointable.Checkpointable):
"""A utility class which groups `Checkpointable` objects.
Accepts arbitrary keyword arguments to its constructor and saves those values
with a checkpoint. Maintains a `save_counter` for numbering checkpoints.
Example usage:
```python
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import os
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
root = tfe.Checkpoint(optimizer=optimizer, model=model)
root.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
optimizer.minimize( ... )
root.save(file_prefix=checkpoint_prefix)
```
For more manual control over saving, use `tfe.CheckpointableSaver` directly.
Attributes:
save_counter: Incremented when `save()` is called. Used to number
checkpoints.
"""
def __init__(self, **kwargs):
"""Group objects into a training checkpoint.
Args:
**kwargs: Keyword arguments are set as attributes of this object, and are
saved with the checkpoint. Attribute values must derive from
`CheckpointableBase`.
Raises:
ValueError: If objects in `kwargs` are not Checkpointable.
"""
super(Checkpoint, self).__init__()
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
if not isinstance(v, core_checkpointable.CheckpointableBase):
raise ValueError(
("`Checkpoint` was expecting an object derived from "
"`CheckpointableBase`, got %s.") % (v,))
setattr(self, k, v)
self._save_counter = None # Created lazily for restore-on-create.
self._saver = CheckpointableSaver(weakref.ref(self))
def _maybe_create_save_counter(self):
"""Create a save counter if it does not yet exist."""
if self._save_counter is None:
# Initialized to 0 and incremented before saving.
with ops.device("/cpu:0"):
self._save_counter = add_variable(
self, name="save_counter", initializer=0, dtype=dtypes.int64)
@property
def save_counter(self):
"""An integer variable which starts at zero and is incremented on save.
Used to number checkpoints.
Returns:
The save counter variable.
"""
self._maybe_create_save_counter()
return self._save_counter
def save(self, file_prefix, session=None):
"""Save a checkpoint. Wraps `tfe.CheckpointableSaver.save`."""
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1)
if in_graph_mode:
session.run(assign_op)
return self._saver.save(
file_prefix=file_prefix,
checkpoint_number=self.save_counter,
session=session)
def restore(self, save_path):
"""Restore a checkpoint. Wraps `tfe.CheckpointableSaver.restore`."""
status = self._saver.restore(save_path=save_path)
# Create the save counter now so it gets initialized with other variables
# when graph building. Creating it earlier would lead to double
# initialization when executing eagerly.
self._maybe_create_save_counter()
return status
| 42.801136
| 117
| 0.711881
|
4a0611ba960682f9f602f3ce36035e3958c86894
| 538
|
py
|
Python
|
doc/scripts/runtime_hook_subclass.py
|
jvail/xarray-simlab
|
3e8cb81775868e3e7c6495489ba351567e0d7e42
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 48
|
2017-06-19T16:31:37.000Z
|
2021-04-26T04:42:48.000Z
|
doc/scripts/runtime_hook_subclass.py
|
rlange2/xarray-simlab
|
45359e99cbf6341464b02cb937618c051a58a31c
|
[
"BSD-3-Clause"
] | 108
|
2017-06-26T12:22:10.000Z
|
2021-03-09T08:57:02.000Z
|
doc/scripts/runtime_hook_subclass.py
|
rlange2/xarray-simlab
|
45359e99cbf6341464b02cb937618c051a58a31c
|
[
"BSD-3-Clause"
] | 10
|
2017-08-11T04:56:20.000Z
|
2021-03-01T16:46:55.000Z
|
# TODO: use sphinx ipython directive when issue fixed
# https://github.com/ipython/ipython/issues/11362
import xsimlab as xs
import time
class PrintStepTime(xs.RuntimeHook):
@xs.runtime_hook("run_step", "model", "pre")
def start_step(self, model, context, state):
self._start_time = time.time()
@xs.runtime_hook("run_step", "model", "post")
def finish_step(self, model, context, state):
step_time = time.time() - self._start_time
print(f"Step {context['step']} took {step_time:.2e} seconds")
| 28.315789
| 69
| 0.684015
|
4a0611cbda4cb32530996863dd77a0a08ac658d6
| 7,530
|
py
|
Python
|
cohesity_management_sdk/models/vmware_restore_parameters.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | 1
|
2021-01-07T20:36:22.000Z
|
2021-01-07T20:36:22.000Z
|
cohesity_management_sdk/models/vmware_restore_parameters.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | null | null | null |
cohesity_management_sdk/models/vmware_restore_parameters.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.network_mapping
class VmwareRestoreParameters(object):
"""Implementation of the 'VmwareRestoreParameters' model.
Specifies the information required for recovering or cloning VmWare VMs.
Attributes:
datastore_folder_id (long|int): Specifies the folder where the restore
datastore should be created. This is applicable only when the VMs
are being cloned.
datastore_id (long|int): Specifies the datastore where the object's
files should be recovered to. This field is mandatory to recover
objects to a different resource pool or to a different parent
source. If not specified, objects are recovered to their original
datastore locations in the parent source.
detach_network (bool): Specifies whether the network should be
detached from the recovered or cloned VMs.
disable_network (bool): Specifies whether the network should be left
in disabled state. Attached network is enabled by default. Set
this flag to true to disable it.
network_id (long|int): Specifies a network configuration to be
attached to the cloned or recovered object. For kCloneVMs and
kRecoverVMs tasks, original network configuration is detached if
the cloned or recovered object is kept under a different parent
Protection Source or a different Resource Pool. By default, for
kRecoverVMs task, original network configuration is preserved if
the recovered object is kept under the same parent Protection
Source and the same Resource Pool. Specify this field to override
the preserved network configuration or to attach a new network
configuration to the cloned or recovered objects. You can get the
networkId of the kNetwork object by setting includeNetworks to
'true' in the GET /public/protectionSources operation. In the
response, get the id of the desired kNetwork object, the resource
pool, and the registered parent Protection Source.
network_mappings (list of NetworkMapping): Specifies the parameters
for mapping the source and target networks. This field can be used
if restoring to a different parent source. This will replace the
NetworkId and DisableNetwork that are used to provide
configuration for a single network. Unless the support for mapping
is available for all the entities old keys can be used to attach a
new network. Supports 'kVMware' for now.
powered_on (bool): Specifies the power state of the cloned or
recovered objects. By default, the cloned or recovered objects are
powered off.
prefix (string): Specifies a prefix to prepended to the source object
name to derive a new name for the recovered or cloned object. By
default, cloned or recovered objects retain their original name.
Length of this field is limited to 8 characters.
resource_pool_id (long|int): Specifies the resource pool where the
cloned or recovered objects are attached. This field is mandatory
for kCloneVMs Restore Tasks always. For kRecoverVMs Restore Tasks,
this field is mandatory only if newParentId field is specified. If
this field is not specified, recovered objects are attached to the
original resource pool under the original parent.
suffix (string): Specifies a suffix to appended to the original source
object name to derive a new name for the recovered or cloned
object. By default, cloned or recovered objects retain their
original name. Length of this field is limited to 8 characters.
vm_folder_id (long|int): Specifies a folder where the VMs should be
restored. This is applicable only when the VMs are being restored
to an alternate location or if clone is being performed.
"""
# Create a mapping from Model property names to API property names
_names = {
"datastore_folder_id":'datastoreFolderId',
"datastore_id":'datastoreId',
"detach_network":'detachNetwork',
"disable_network":'disableNetwork',
"network_id":'networkId',
"network_mappings":'networkMappings',
"powered_on":'poweredOn',
"prefix":'prefix',
"resource_pool_id":'resourcePoolId',
"suffix":'suffix',
"vm_folder_id":'vmFolderId'
}
def __init__(self,
datastore_folder_id=None,
datastore_id=None,
detach_network=None,
disable_network=None,
network_id=None,
network_mappings=None,
powered_on=None,
prefix=None,
resource_pool_id=None,
suffix=None,
vm_folder_id=None):
"""Constructor for the VmwareRestoreParameters class"""
# Initialize members of the class
self.datastore_folder_id = datastore_folder_id
self.datastore_id = datastore_id
self.detach_network = detach_network
self.disable_network = disable_network
self.network_id = network_id
self.network_mappings = network_mappings
self.powered_on = powered_on
self.prefix = prefix
self.resource_pool_id = resource_pool_id
self.suffix = suffix
self.vm_folder_id = vm_folder_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
datastore_folder_id = dictionary.get('datastoreFolderId')
datastore_id = dictionary.get('datastoreId')
detach_network = dictionary.get('detachNetwork')
disable_network = dictionary.get('disableNetwork')
network_id = dictionary.get('networkId')
network_mappings = None
if dictionary.get('networkMappings') != None:
network_mappings = list()
for structure in dictionary.get('networkMappings'):
network_mappings.append(cohesity_management_sdk.models.network_mapping.NetworkMapping.from_dictionary(structure))
powered_on = dictionary.get('poweredOn')
prefix = dictionary.get('prefix')
resource_pool_id = dictionary.get('resourcePoolId')
suffix = dictionary.get('suffix')
vm_folder_id = dictionary.get('vmFolderId')
# Return an object of this model
return cls(datastore_folder_id,
datastore_id,
detach_network,
disable_network,
network_id,
network_mappings,
powered_on,
prefix,
resource_pool_id,
suffix,
vm_folder_id)
| 46.770186
| 129
| 0.65166
|
4a0611e36fa01a0e4ed61083842a0fdecae06e93
| 476
|
py
|
Python
|
lists.py
|
jplusplus/goodiebag
|
8f8f26719220fe69efd42edf4a1309e522146d41
|
[
"0BSD"
] | null | null | null |
lists.py
|
jplusplus/goodiebag
|
8f8f26719220fe69efd42edf4a1309e522146d41
|
[
"0BSD"
] | null | null | null |
lists.py
|
jplusplus/goodiebag
|
8f8f26719220fe69efd42edf4a1309e522146d41
|
[
"0BSD"
] | 2
|
2015-10-19T08:16:02.000Z
|
2020-10-19T08:22:48.000Z
|
# -*- coding: utf-8 -*-
def get_unique(list_):
"""Returnerar en lista där varje värde bara förekommer
en gång.
"""
return list(set(list_))
def flatten_list(list_):
""" Returnerar en endimensionell lista [a, b, c, d, e],
givet en tvådimensionell [[a, b], [c], [d, e]]
"""
return [inner
for outer in list_
for inner in outer]
print(flatten_list([[1, 2], [3], [4, 5]]))
print(get_unique([1, 2, 3, 1, 6, 1, 4, 5]))
| 20.695652
| 59
| 0.556723
|
4a06123eacb453444ef363610c58daf3ff7e762e
| 776
|
py
|
Python
|
send2trash/win/__init__.py
|
hsoft/send2trash
|
be402728fb7f5f889961d38ca10648ac97379845
|
[
"BSD-3-Clause"
] | 147
|
2015-01-06T07:08:43.000Z
|
2020-05-20T12:52:01.000Z
|
send2trash/win/__init__.py
|
hsoft/send2trash
|
be402728fb7f5f889961d38ca10648ac97379845
|
[
"BSD-3-Clause"
] | 43
|
2015-06-04T15:39:16.000Z
|
2020-06-03T17:05:31.000Z
|
send2trash/win/__init__.py
|
hsoft/send2trash
|
be402728fb7f5f889961d38ca10648ac97379845
|
[
"BSD-3-Clause"
] | 32
|
2015-03-24T08:27:15.000Z
|
2020-05-21T16:20:16.000Z
|
# Copyright 2017 Virgil Dupras
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from __future__ import unicode_literals
from platform import version
# if windows is vista or newer and pywin32 is available use IFileOperation
if int(version().split(".", 1)[0]) >= 6:
try:
# Attempt to use pywin32 to use IFileOperation
from send2trash.win.modern import send2trash
except ImportError:
# use SHFileOperation as fallback
from send2trash.win.legacy import send2trash
else:
# use SHFileOperation as fallback
from send2trash.win.legacy import send2trash # noqa: F401
| 36.952381
| 87
| 0.743557
|
4a06127c59d0df4084773b6f7559d54908474033
| 1,884
|
py
|
Python
|
Projects/calc.py
|
SanjanaSogimatt/Python
|
a84b94aadd599eb189bf637eebd7f0db703d798d
|
[
"MIT"
] | 12
|
2021-01-18T16:22:27.000Z
|
2021-11-30T04:38:27.000Z
|
Projects/calc.py
|
SanjanaSogimatt/Python
|
a84b94aadd599eb189bf637eebd7f0db703d798d
|
[
"MIT"
] | 31
|
2021-03-02T16:33:16.000Z
|
2022-03-30T04:01:15.000Z
|
Projects/calc.py
|
SanjanaSogimatt/Python
|
a84b94aadd599eb189bf637eebd7f0db703d798d
|
[
"MIT"
] | 31
|
2021-03-02T14:26:17.000Z
|
2022-01-30T16:51:08.000Z
|
# calculator function - which when called will function as a calculator
def calculator():
print("Options:\n\t[1] Add \n\t[2] Subtract \n\t[3] Multiply \n\t[4] Divide \n\t[5] Power \n\t[6] Square root")
ch = int(input("\t--> ")) # take option input from user
#addition
if ch == 1:
num1 = float(input("1st number --> "))
num2 = float(input("2nd number --> "))
print(f"{num1} + {num2} = {num1 + num2}")
#subtraction
elif ch == 2:
num1 = float(input("1st number --> "))
num2 = float(input("2nd number --> "))
print(f"{num1} - {num2} = {num1 - num2}")
#multiplication
elif ch == 3:
num1 = float(input("1st number --> "))
num2 = float(input("2nd number --> "))
print(f"{num1} x {num2} = {num1 * num2}")
#division
elif ch == 4:
num1 = float(input("Dividend --> "))
num2 = float(input("Divisor --> "))
# try-except which checks if divisor is zero (which isn't allowed)
try:
print(f"{num1} ÷ {num2} = {num1 / num2}")
except ZeroDivisionError:
print(f"{num1} ÷ {num2} = Error: Division by 0!")
#power
elif ch == 5:
num = float(input("Number --> "))
power = float(input("Power --> "))
print(f"{num} ^ {power} = {num ** power}")
#root
elif ch == 6:
num = float(input("Number --> "))
print(f"√{num} = {num**(1/2)}")
else:
print("Invalid input!!")
#====================
# MAIN PROGRAM
print("<-- Basic Calculator -->")
print("Does what it says on the tin!")
print("-" * 30) #decoration
run = 'Y'
while run == 'Y':
calculator()
print("-" * 30)
print("Would you like calculate more?\n\t[Y] Yes\n\t[N] No")
run = input("\t--> ").upper()
while run not in ['Y','YES','N','NO']:
run = input("\t--> ").upper()
print("-" * 30)
| 31.4
| 115
| 0.508493
|
4a0612bb5b044dabde8552cba901dc29f29b24bf
| 1,404
|
py
|
Python
|
quotespage/urls.py
|
Cornell-CIS-Slack/cs-quotes
|
a4451ff0703acebb762641cbc236cc0e51e2d2fd
|
[
"BSD-3-Clause"
] | 1
|
2017-10-04T16:16:22.000Z
|
2017-10-04T16:16:22.000Z
|
quotespage/urls.py
|
Cornell-CIS-Slack/cs-quotes
|
a4451ff0703acebb762641cbc236cc0e51e2d2fd
|
[
"BSD-3-Clause"
] | null | null | null |
quotespage/urls.py
|
Cornell-CIS-Slack/cs-quotes
|
a4451ff0703acebb762641cbc236cc0e51e2d2fd
|
[
"BSD-3-Clause"
] | null | null | null |
from django.urls import path, re_path
from django.views.generic import TemplateView
from quotespage import views
app_name='quotespage'
urlpatterns = [
path('', views.index, name='index'),
path('submit/', views.submit, name='submit'),
path('speakers/', views.speaker_list, name='speaker-list'),
re_path(r'^speaker/(?P<speaker>[-,%.\w]+)/$', views.speaker_archive, name='speaker'),
re_path(r'^speaker/(?P<speaker>[-,%.\w]+)/(?P<pagenum>\d+)/$', views.speaker_archive, name='speaker-pages'),
path('random/', views.random_quote, name='random'),
re_path(r'^random/(?P<year>\d{4})/$', views.random_quote, name='random-byyear'),
path('byvotes/', views.top_voted, name='byvotes'),
re_path(r'^byvotes/(?P<pagenum>\d+)/$', views.top_voted, name='byvotes-pages'),
path('about/', TemplateView.as_view(template_name="quotespage/about.html"), name='about'),
path('search/', views.search, name='search'),
path('submit/success/', TemplateView.as_view(template_name="quotespage/success.html"), name='success'),
re_path(r'^page/(?P<pagenum>\d+)/$', views.index, name='pages'),
re_path(r'^quote/(?P<quoteid>\d+)/$', views.permalink, name='permalink'),
path('api/vote/', views.vote, name='vote'),
path('api/random/', views.json_random_quote, name='api-random'),
path('api/genkey/', views.generate_api_key, name='genkey'),
path('api/submit/', views.remote_submit, name='remote-submit'),
]
| 50.142857
| 109
| 0.687322
|
4a0612f4dcf19768617230233b7da3c0e42249f2
| 92,428
|
py
|
Python
|
nltk/downloader.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
nltk/downloader.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
nltk/downloader.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
# Natural Language Toolkit: Corpus & Model Downloader
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
The NLTK corpus and module downloader. This module defines several
interfaces which can be used to download corpora, models, and other
data packages that can be used with NLTK.
Downloading Packages
====================
If called with no arguments, ``download()`` will display an interactive
interface which can be used to download and install new packages.
If Tkinter is available, then a graphical interface will be shown,
otherwise a simple text interface will be provided.
Individual packages can be downloaded by calling the ``download()``
function with a single argument, giving the package identifier for the
package that should be downloaded:
>>> download('treebank') # doctest: +SKIP
[nltk_data] Downloading package 'treebank'...
[nltk_data] Unzipping corpora/treebank.zip.
NLTK also provides a number of \"package collections\", consisting of
a group of related packages. To download all packages in a
colleciton, simply call ``download()`` with the collection's
identifier:
>>> download('all-corpora') # doctest: +SKIP
[nltk_data] Downloading package 'abc'...
[nltk_data] Unzipping corpora/abc.zip.
[nltk_data] Downloading package 'alpino'...
[nltk_data] Unzipping corpora/alpino.zip.
...
[nltk_data] Downloading package 'words'...
[nltk_data] Unzipping corpora/words.zip.
Download Directory
==================
By default, packages are installed in either a system-wide directory
(if Python has sufficient access to write to it); or in the current
user's home directory. However, the ``download_dir`` argument may be
used to specify a different installation target, if desired.
See ``Downloader.default_download_dir()`` for more a detailed
description of how the default download directory is chosen.
NLTK Download Server
====================
Before downloading any packages, the corpus and module downloader
contacts the NLTK download server, to retrieve an index file
describing the available packages. By default, this index file is
loaded from ``https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml``.
If necessary, it is possible to create a new ``Downloader`` object,
specifying a different URL for the package index file.
Usage::
python nltk/downloader.py [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
or::
python -m nltk.downloader [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
"""
# ----------------------------------------------------------------------
"""
0 1 2 3
[label][----][label][----]
[column ][column ]
Notes
=====
Handling data files.. Some questions:
* Should the data files be kept zipped or unzipped? I say zipped.
* Should the data files be kept in svn at all? Advantages: history;
automatic version numbers; 'svn up' could be used rather than the
downloader to update the corpora. Disadvantages: they're big,
which makes working from svn a bit of a pain. And we're planning
to potentially make them much bigger. I don't think we want
people to have to download 400MB corpora just to use nltk from svn.
* Compromise: keep the data files in trunk/data rather than in
trunk/nltk. That way you can check them out in svn if you want
to; but you don't need to, and you can use the downloader instead.
* Also: keep models in mind. When we change the code, we'd
potentially like the models to get updated. This could require a
little thought.
* So.. let's assume we have a trunk/data directory, containing a bunch
of packages. The packages should be kept as zip files, because we
really shouldn't be editing them much (well -- we may edit models
more, but they tend to be binary-ish files anyway, where diffs
aren't that helpful). So we'll have trunk/data, with a bunch of
files like abc.zip and treebank.zip and propbank.zip. For each
package we could also have eg treebank.xml and propbank.xml,
describing the contents of the package (name, copyright, license,
etc). Collections would also have .xml files. Finally, we would
pull all these together to form a single index.xml file. Some
directory structure wouldn't hurt. So how about::
/trunk/data/ ....................... root of data svn
index.xml ........................ main index file
src/ ............................. python scripts
packages/ ........................ dir for packages
corpora/ ....................... zip & xml files for corpora
grammars/ ...................... zip & xml files for grammars
taggers/ ....................... zip & xml files for taggers
tokenizers/ .................... zip & xml files for tokenizers
etc.
collections/ ..................... xml files for collections
Where the root (/trunk/data) would contain a makefile; and src/
would contain a script to update the info.xml file. It could also
contain scripts to rebuild some of the various model files. The
script that builds index.xml should probably check that each zip
file expands entirely into a single subdir, whose name matches the
package's uid.
Changes I need to make:
- in index: change "size" to "filesize" or "compressed-size"
- in index: add "unzipped-size"
- when checking status: check both compressed & uncompressed size.
uncompressed size is important to make sure we detect a problem
if something got partially unzipped. define new status values
to differentiate stale vs corrupt vs corruptly-uncompressed??
(we shouldn't need to re-download the file if the zip file is ok
but it didn't get uncompressed fully.)
- add other fields to the index: author, license, copyright, contact,
etc.
the current grammars/ package would become a single new package (eg
toy-grammars or book-grammars).
xml file should have:
- authorship info
- license info
- copyright info
- contact info
- info about what type of data/annotation it contains?
- recommended corpus reader?
collections can contain other collections. they can also contain
multiple package types (corpora & models). Have a single 'basics'
package that includes everything we talk about in the book?
n.b.: there will have to be a fallback to the punkt tokenizer, in case
they didn't download that model.
default: unzip or not?
"""
import time, os, zipfile, sys, textwrap, threading, itertools, shutil, functools
import subprocess
from hashlib import md5
from xml.etree import ElementTree
try:
TKINTER = True
from tkinter import (
Tk,
Frame,
Label,
Entry,
Button,
Canvas,
Menu,
IntVar,
TclError,
)
from tkinter.messagebox import showerror
from nltk.draw.table import Table
from nltk.draw.util import ShowText
except ImportError:
TKINTER = False
TclError = ValueError
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
import nltk
# urllib2 = nltk.internals.import_from_stdlib('urllib2')
######################################################################
# Directory entry objects (from the data server's index file)
######################################################################
class Package(object):
"""
A directory entry for a downloadable package. These entries are
extracted from the XML index file that is downloaded by
``Downloader``. Each package consists of a single file; but if
that file is a zip file, then it can be automatically decompressed
when the package is installed.
"""
def __init__(
self,
id,
url,
name=None,
subdir="",
size=None,
unzipped_size=None,
checksum=None,
svn_revision=None,
copyright="Unknown",
contact="Unknown",
license="Unknown",
author="Unknown",
unzip=True,
**kw
):
self.id = id
"""A unique identifier for this package."""
self.name = name or id
"""A string name for this package."""
self.subdir = subdir
"""The subdirectory where this package should be installed.
E.g., ``'corpora'`` or ``'taggers'``."""
self.url = url
"""A URL that can be used to download this package's file."""
self.size = int(size)
"""The filesize (in bytes) of the package file."""
self.unzipped_size = int(unzipped_size)
"""The total filesize of the files contained in the package's
zipfile."""
self.checksum = checksum
"""The MD-5 checksum of the package file."""
self.svn_revision = svn_revision
"""A subversion revision number for this package."""
self.copyright = copyright
"""Copyright holder for this package."""
self.contact = contact
"""Name & email of the person who should be contacted with
questions about this package."""
self.license = license
"""License information for this package."""
self.author = author
"""Author of this package."""
ext = os.path.splitext(url.split("/")[-1])[1]
self.filename = os.path.join(subdir, id + ext)
"""The filename that should be used for this package's file. It
is formed by joining ``self.subdir`` with ``self.id``, and
using the same extension as ``url``."""
self.unzip = bool(int(unzip)) # '0' or '1'
"""A flag indicating whether this corpus should be unzipped by
default."""
# Include any other attributes provided by the XML file.
self.__dict__.update(kw)
@staticmethod
def fromxml(xml):
if isinstance(xml, str):
xml = ElementTree.parse(xml)
for key in xml.attrib:
xml.attrib[key] = str(xml.attrib[key])
return Package(**xml.attrib)
def __lt__(self, other):
return self.id < other.id
def __repr__(self):
return "<Package %s>" % self.id
class Collection(object):
"""
A directory entry for a collection of downloadable packages.
These entries are extracted from the XML index file that is
downloaded by ``Downloader``.
"""
def __init__(self, id, children, name=None, **kw):
self.id = id
"""A unique identifier for this collection."""
self.name = name or id
"""A string name for this collection."""
self.children = children
"""A list of the ``Collections`` or ``Packages`` directly
contained by this collection."""
self.packages = None
"""A list of ``Packages`` contained by this collection or any
collections it recursively contains."""
# Include any other attributes provided by the XML file.
self.__dict__.update(kw)
@staticmethod
def fromxml(xml):
if isinstance(xml, str):
xml = ElementTree.parse(xml)
for key in xml.attrib:
xml.attrib[key] = str(xml.attrib[key])
children = [child.get("ref") for child in xml.findall("item")]
return Collection(children=children, **xml.attrib)
def __lt__(self, other):
return self.id < other.id
def __repr__(self):
return "<Collection %s>" % self.id
######################################################################
# Message Passing Objects
######################################################################
class DownloaderMessage(object):
"""A status message object, used by ``incr_download`` to
communicate its progress."""
class StartCollectionMessage(DownloaderMessage):
"""Data server has started working on a collection of packages."""
def __init__(self, collection):
self.collection = collection
class FinishCollectionMessage(DownloaderMessage):
"""Data server has finished working on a collection of packages."""
def __init__(self, collection):
self.collection = collection
class StartPackageMessage(DownloaderMessage):
"""Data server has started working on a package."""
def __init__(self, package):
self.package = package
class FinishPackageMessage(DownloaderMessage):
"""Data server has finished working on a package."""
def __init__(self, package):
self.package = package
class StartDownloadMessage(DownloaderMessage):
"""Data server has started downloading a package."""
def __init__(self, package):
self.package = package
class FinishDownloadMessage(DownloaderMessage):
"""Data server has finished downloading a package."""
def __init__(self, package):
self.package = package
class StartUnzipMessage(DownloaderMessage):
"""Data server has started unzipping a package."""
def __init__(self, package):
self.package = package
class FinishUnzipMessage(DownloaderMessage):
"""Data server has finished unzipping a package."""
def __init__(self, package):
self.package = package
class UpToDateMessage(DownloaderMessage):
"""The package download file is already up-to-date"""
def __init__(self, package):
self.package = package
class StaleMessage(DownloaderMessage):
"""The package download file is out-of-date or corrupt"""
def __init__(self, package):
self.package = package
class ErrorMessage(DownloaderMessage):
"""Data server encountered an error"""
def __init__(self, package, message):
self.package = package
if isinstance(message, Exception):
self.message = str(message)
else:
self.message = message
class ProgressMessage(DownloaderMessage):
"""Indicates how much progress the data server has made"""
def __init__(self, progress):
self.progress = progress
class SelectDownloadDirMessage(DownloaderMessage):
"""Indicates what download directory the data server is using"""
def __init__(self, download_dir):
self.download_dir = download_dir
######################################################################
# NLTK Data Server
######################################################################
class Downloader(object):
"""
A class used to access the NLTK data server, which can be used to
download corpora and other data packages.
"""
# /////////////////////////////////////////////////////////////////
# Configuration
# /////////////////////////////////////////////////////////////////
INDEX_TIMEOUT = 60 * 60 # 1 hour
"""The amount of time after which the cached copy of the data
server index will be considered 'stale,' and will be
re-downloaded."""
DEFAULT_URL = "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml"
"""The default URL for the NLTK data server's index. An
alternative URL can be specified when creating a new
``Downloader`` object."""
# /////////////////////////////////////////////////////////////////
# Status Constants
# /////////////////////////////////////////////////////////////////
INSTALLED = "installed"
"""A status string indicating that a package or collection is
installed and up-to-date."""
NOT_INSTALLED = "not installed"
"""A status string indicating that a package or collection is
not installed."""
STALE = "out of date"
"""A status string indicating that a package or collection is
corrupt or out-of-date."""
PARTIAL = "partial"
"""A status string indicating that a collection is partially
installed (i.e., only some of its packages are installed.)"""
# /////////////////////////////////////////////////////////////////
# Cosntructor
# /////////////////////////////////////////////////////////////////
def __init__(self, server_index_url=None, download_dir=None):
self._url = server_index_url or self.DEFAULT_URL
"""The URL for the data server's index file."""
self._collections = {}
"""Dictionary from collection identifier to ``Collection``"""
self._packages = {}
"""Dictionary from package identifier to ``Package``"""
self._download_dir = download_dir
"""The default directory to which packages will be downloaded."""
self._index = None
"""The XML index file downloaded from the data server"""
self._index_timestamp = None
"""Time at which ``self._index`` was downloaded. If it is more
than ``INDEX_TIMEOUT`` seconds old, it will be re-downloaded."""
self._status_cache = {}
"""Dictionary from package/collection identifier to status
string (``INSTALLED``, ``NOT_INSTALLED``, ``STALE``, or
``PARTIAL``). Cache is used for packages only, not
collections."""
self._errors = None
"""Flag for telling if all packages got successfully downloaded or not."""
# decide where we're going to save things to.
if self._download_dir is None:
self._download_dir = self.default_download_dir()
# /////////////////////////////////////////////////////////////////
# Information
# /////////////////////////////////////////////////////////////////
def list(
self,
download_dir=None,
show_packages=True,
show_collections=True,
header=True,
more_prompt=False,
skip_installed=False,
):
lines = 0 # for more_prompt
if download_dir is None:
download_dir = self._download_dir
print("Using default data directory (%s)" % download_dir)
if header:
print("=" * (26 + len(self._url)))
print(" Data server index for <%s>" % self._url)
print("=" * (26 + len(self._url)))
lines += 3 # for more_prompt
stale = partial = False
categories = []
if show_packages:
categories.append("packages")
if show_collections:
categories.append("collections")
for category in categories:
print("%s:" % category.capitalize())
lines += 1 # for more_prompt
for info in sorted(getattr(self, category)(), key=str):
status = self.status(info, download_dir)
if status == self.INSTALLED and skip_installed:
continue
if status == self.STALE:
stale = True
if status == self.PARTIAL:
partial = True
prefix = {
self.INSTALLED: "*",
self.STALE: "-",
self.PARTIAL: "P",
self.NOT_INSTALLED: " ",
}[status]
name = textwrap.fill(
"-" * 27 + (info.name or info.id), 75, subsequent_indent=27 * " "
)[27:]
print(" [%s] %s %s" % (prefix, info.id.ljust(20, "."), name))
lines += len(name.split("\n")) # for more_prompt
if more_prompt and lines > 20:
user_input = input("Hit Enter to continue: ")
if user_input.lower() in ("x", "q"):
return
lines = 0
print()
msg = "([*] marks installed packages"
if stale:
msg += "; [-] marks out-of-date or corrupt packages"
if partial:
msg += "; [P] marks partially installed collections"
print(textwrap.fill(msg + ")", subsequent_indent=" ", width=76))
def packages(self):
self._update_index()
return self._packages.values()
def corpora(self):
self._update_index()
return [pkg for (id, pkg) in self._packages.items() if pkg.subdir == "corpora"]
def models(self):
self._update_index()
return [pkg for (id, pkg) in self._packages.items() if pkg.subdir != "corpora"]
def collections(self):
self._update_index()
return self._collections.values()
# /////////////////////////////////////////////////////////////////
# Downloading
# /////////////////////////////////////////////////////////////////
def _info_or_id(self, info_or_id):
if isinstance(info_or_id, str):
return self.info(info_or_id)
else:
return info_or_id
# [xx] When during downloading is it 'safe' to abort? Only unsafe
# time is *during* an unzip -- we don't want to leave a
# partially-unzipped corpus in place because we wouldn't notice
# it. But if we had the exact total size of the unzipped corpus,
# then that would be fine. Then we could abort anytime we want!
# So this is really what we should do. That way the threaded
# downloader in the gui can just kill the download thread anytime
# it wants.
def incr_download(self, info_or_id, download_dir=None, force=False):
# If they didn't specify a download_dir, then use the default one.
if download_dir is None:
download_dir = self._download_dir
yield SelectDownloadDirMessage(download_dir)
# If they gave us a list of ids, then download each one.
if isinstance(info_or_id, (list, tuple)):
for msg in self._download_list(info_or_id, download_dir, force):
yield msg
return
# Look up the requested collection or package.
try:
info = self._info_or_id(info_or_id)
except (IOError, ValueError) as e:
yield ErrorMessage(None, "Error loading %s: %s" % (info_or_id, e))
return
# Handle collections.
if isinstance(info, Collection):
yield StartCollectionMessage(info)
for msg in self.incr_download(info.children, download_dir, force):
yield msg
yield FinishCollectionMessage(info)
# Handle Packages (delegate to a helper function).
else:
for msg in self._download_package(info, download_dir, force):
yield msg
def _num_packages(self, item):
if isinstance(item, Package):
return 1
else:
return len(item.packages)
def _download_list(self, items, download_dir, force):
# Look up the requested items.
for i in range(len(items)):
try:
items[i] = self._info_or_id(items[i])
except (IOError, ValueError) as e:
yield ErrorMessage(items[i], e)
return
# Download each item, re-scaling their progress.
num_packages = sum(self._num_packages(item) for item in items)
progress = 0
for i, item in enumerate(items):
if isinstance(item, Package):
delta = 1.0 / num_packages
else:
delta = len(item.packages) / num_packages
for msg in self.incr_download(item, download_dir, force):
if isinstance(msg, ProgressMessage):
yield ProgressMessage(progress + msg.progress * delta)
else:
yield msg
progress += 100 * delta
def _download_package(self, info, download_dir, force):
yield StartPackageMessage(info)
yield ProgressMessage(0)
# Do we already have the current version?
status = self.status(info, download_dir)
if not force and status == self.INSTALLED:
yield UpToDateMessage(info)
yield ProgressMessage(100)
yield FinishPackageMessage(info)
return
# Remove the package from our status cache
self._status_cache.pop(info.id, None)
# Check for (and remove) any old/stale version.
filepath = os.path.join(download_dir, info.filename)
if os.path.exists(filepath):
if status == self.STALE:
yield StaleMessage(info)
os.remove(filepath)
# Ensure the download_dir exists
if not os.path.exists(download_dir):
os.mkdir(download_dir)
if not os.path.exists(os.path.join(download_dir, info.subdir)):
os.mkdir(os.path.join(download_dir, info.subdir))
# Download the file. This will raise an IOError if the url
# is not found.
yield StartDownloadMessage(info)
yield ProgressMessage(5)
try:
infile = urlopen(info.url)
with open(filepath, "wb") as outfile:
num_blocks = max(1, info.size / (1024 * 16))
for block in itertools.count():
s = infile.read(1024 * 16) # 16k blocks.
outfile.write(s)
if not s:
break
if block % 2 == 0: # how often?
yield ProgressMessage(min(80, 5 + 75 * (block / num_blocks)))
infile.close()
except IOError as e:
yield ErrorMessage(
info,
"Error downloading %r from <%s>:" "\n %s" % (info.id, info.url, e),
)
return
yield FinishDownloadMessage(info)
yield ProgressMessage(80)
# If it's a zipfile, uncompress it.
if info.filename.endswith(".zip"):
zipdir = os.path.join(download_dir, info.subdir)
# Unzip if we're unzipping by default; *or* if it's already
# been unzipped (presumably a previous version).
if info.unzip or os.path.exists(os.path.join(zipdir, info.id)):
yield StartUnzipMessage(info)
for msg in _unzip_iter(filepath, zipdir, verbose=False):
# Somewhat of a hack, but we need a proper package reference
msg.package = info
yield msg
yield FinishUnzipMessage(info)
yield FinishPackageMessage(info)
def download(
self,
info_or_id=None,
download_dir=None,
quiet=False,
force=False,
prefix="[nltk_data] ",
halt_on_error=True,
raise_on_error=False,
print_error_to=sys.stderr,
):
print_to = functools.partial(print, file=print_error_to)
# If no info or id is given, then use the interactive shell.
if info_or_id is None:
# [xx] hmm -- changing self._download_dir here seems like
# the wrong thing to do. Maybe the _interactive_download
# function should make a new copy of self to use?
if download_dir is not None:
self._download_dir = download_dir
self._interactive_download()
return True
else:
# Define a helper function for displaying output:
def show(s, prefix2=""):
print_to(
textwrap.fill(
s,
initial_indent=prefix + prefix2,
subsequent_indent=prefix + prefix2 + " " * 4,
)
)
for msg in self.incr_download(info_or_id, download_dir, force):
# Error messages
if isinstance(msg, ErrorMessage):
show(msg.message)
if raise_on_error:
raise ValueError(msg.message)
if halt_on_error:
return False
self._errors = True
if not quiet:
print_to("Error installing package. Retry? [n/y/e]")
choice = input().strip()
if choice in ["y", "Y"]:
if not self.download(
msg.package.id,
download_dir,
quiet,
force,
prefix,
halt_on_error,
raise_on_error,
):
return False
elif choice in ["e", "E"]:
return False
# All other messages
if not quiet:
# Collection downloading messages:
if isinstance(msg, StartCollectionMessage):
show("Downloading collection %r" % msg.collection.id)
prefix += " | "
print_to(prefix)
elif isinstance(msg, FinishCollectionMessage):
print_to(prefix)
prefix = prefix[:-4]
if self._errors:
show(
"Downloaded collection %r with errors"
% msg.collection.id
)
else:
show("Done downloading collection %s" % msg.collection.id)
# Package downloading messages:
elif isinstance(msg, StartPackageMessage):
show(
"Downloading package %s to %s..."
% (msg.package.id, download_dir)
)
elif isinstance(msg, UpToDateMessage):
show("Package %s is already up-to-date!" % msg.package.id, " ")
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt' %
# msg.package.id, ' ')
elif isinstance(msg, StartUnzipMessage):
show("Unzipping %s." % msg.package.filename, " ")
# Data directory message:
elif isinstance(msg, SelectDownloadDirMessage):
download_dir = msg.download_dir
return True
def is_stale(self, info_or_id, download_dir=None):
return self.status(info_or_id, download_dir) == self.STALE
def is_installed(self, info_or_id, download_dir=None):
return self.status(info_or_id, download_dir) == self.INSTALLED
def clear_status_cache(self, id=None):
if id is None:
self._status_cache.clear()
else:
self._status_cache.pop(id, None)
def status(self, info_or_id, download_dir=None):
"""
Return a constant describing the status of the given package
or collection. Status can be one of ``INSTALLED``,
``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``.
"""
if download_dir is None:
download_dir = self._download_dir
info = self._info_or_id(info_or_id)
# Handle collections:
if isinstance(info, Collection):
pkg_status = [self.status(pkg.id) for pkg in info.packages]
if self.STALE in pkg_status:
return self.STALE
elif self.PARTIAL in pkg_status:
return self.PARTIAL
elif self.INSTALLED in pkg_status and self.NOT_INSTALLED in pkg_status:
return self.PARTIAL
elif self.NOT_INSTALLED in pkg_status:
return self.NOT_INSTALLED
else:
return self.INSTALLED
# Handle packages:
else:
filepath = os.path.join(download_dir, info.filename)
if download_dir != self._download_dir:
return self._pkg_status(info, filepath)
else:
if info.id not in self._status_cache:
self._status_cache[info.id] = self._pkg_status(info, filepath)
return self._status_cache[info.id]
def _pkg_status(self, info, filepath):
if not os.path.exists(filepath):
return self.NOT_INSTALLED
# Check if the file has the correct size.
try:
filestat = os.stat(filepath)
except OSError:
return self.NOT_INSTALLED
if filestat.st_size != int(info.size):
return self.STALE
# Check if the file's checksum matches
if md5_hexdigest(filepath) != info.checksum:
return self.STALE
# If it's a zipfile, and it's been at least partially
# unzipped, then check if it's been fully unzipped.
if filepath.endswith(".zip"):
unzipdir = filepath[:-4]
if not os.path.exists(unzipdir):
return self.INSTALLED # but not unzipped -- ok!
if not os.path.isdir(unzipdir):
return self.STALE
unzipped_size = sum(
os.stat(os.path.join(d, f)).st_size
for d, _, files in os.walk(unzipdir)
for f in files
)
if unzipped_size != info.unzipped_size:
return self.STALE
# Otherwise, everything looks good.
return self.INSTALLED
def update(self, quiet=False, prefix="[nltk_data] "):
"""
Re-download any packages whose status is STALE.
"""
self.clear_status_cache()
for pkg in self.packages():
if self.status(pkg) == self.STALE:
self.download(pkg, quiet=quiet, prefix=prefix)
# /////////////////////////////////////////////////////////////////
# Index
# /////////////////////////////////////////////////////////////////
def _update_index(self, url=None):
"""A helper function that ensures that self._index is
up-to-date. If the index is older than self.INDEX_TIMEOUT,
then download it again."""
# Check if the index is aleady up-to-date. If so, do nothing.
if not (
self._index is None
or url is not None
or time.time() - self._index_timestamp > self.INDEX_TIMEOUT
):
return
# If a URL was specified, then update our URL.
self._url = url or self._url
# Download the index file.
self._index = nltk.internals.ElementWrapper(
ElementTree.parse(urlopen(self._url)).getroot()
)
self._index_timestamp = time.time()
# Build a dictionary of packages.
packages = [Package.fromxml(p) for p in self._index.findall("packages/package")]
self._packages = dict((p.id, p) for p in packages)
# Build a dictionary of collections.
collections = [
Collection.fromxml(c) for c in self._index.findall("collections/collection")
]
self._collections = dict((c.id, c) for c in collections)
# Replace identifiers with actual children in collection.children.
for collection in self._collections.values():
for i, child_id in enumerate(collection.children):
if child_id in self._packages:
collection.children[i] = self._packages[child_id]
elif child_id in self._collections:
collection.children[i] = self._collections[child_id]
else:
print(
"removing collection member with no package: {}".format(
child_id
)
)
del collection.children[i]
# Fill in collection.packages for each collection.
for collection in self._collections.values():
packages = {}
queue = [collection]
for child in queue:
if isinstance(child, Collection):
queue.extend(child.children)
elif isinstance(child, Package):
packages[child.id] = child
else:
pass
collection.packages = packages.values()
# Flush the status cache
self._status_cache.clear()
def index(self):
"""
Return the XML index describing the packages available from
the data server. If necessary, this index will be downloaded
from the data server.
"""
self._update_index()
return self._index
def info(self, id):
"""Return the ``Package`` or ``Collection`` record for the
given item."""
self._update_index()
if id in self._packages:
return self._packages[id]
if id in self._collections:
return self._collections[id]
raise ValueError("Package %r not found in index" % id)
def xmlinfo(self, id):
"""Return the XML info record for the given item"""
self._update_index()
for package in self._index.findall("packages/package"):
if package.get("id") == id:
return package
for collection in self._index.findall("collections/collection"):
if collection.get("id") == id:
return collection
raise ValueError("Package %r not found in index" % id)
# /////////////////////////////////////////////////////////////////
# URL & Data Directory
# /////////////////////////////////////////////////////////////////
def _get_url(self):
"""The URL for the data server's index file."""
return self._url
def _set_url(self, url):
"""
Set a new URL for the data server. If we're unable to contact
the given url, then the original url is kept.
"""
original_url = self._url
try:
self._update_index(url)
except:
self._url = original_url
raise
url = property(_get_url, _set_url)
def default_download_dir(self):
"""
Return the directory to which packages will be downloaded by
default. This value can be overridden using the constructor,
or on a case-by-case basis using the ``download_dir`` argument when
calling ``download()``.
On Windows, the default download directory is
``PYTHONHOME/lib/nltk``, where *PYTHONHOME* is the
directory containing Python, e.g. ``C:\\Python25``.
On all other platforms, the default directory is the first of
the following which exists or which can be created with write
permission: ``/usr/share/nltk_data``, ``/usr/local/share/nltk_data``,
``/usr/lib/nltk_data``, ``/usr/local/lib/nltk_data``, ``~/nltk_data``.
"""
# Check if we are on GAE where we cannot write into filesystem.
if "APPENGINE_RUNTIME" in os.environ:
return
# Check if we have sufficient permissions to install in a
# variety of system-wide locations.
for nltkdir in nltk.data.path:
if os.path.exists(nltkdir) and nltk.internals.is_writable(nltkdir):
return nltkdir
# On Windows, use %APPDATA%
if sys.platform == "win32" and "APPDATA" in os.environ:
homedir = os.environ["APPDATA"]
# Otherwise, install in the user's home directory.
else:
homedir = os.path.expanduser("~/")
if homedir == "~/":
raise ValueError("Could not find a default download directory")
# append "nltk_data" to the home directory
return os.path.join(homedir, "nltk_data")
def _get_download_dir(self):
"""
The default directory to which packages will be downloaded.
This defaults to the value returned by ``default_download_dir()``.
To override this default on a case-by-case basis, use the
``download_dir`` argument when calling ``download()``.
"""
return self._download_dir
def _set_download_dir(self, download_dir):
self._download_dir = download_dir
# Clear the status cache.
self._status_cache.clear()
download_dir = property(_get_download_dir, _set_download_dir)
# /////////////////////////////////////////////////////////////////
# Interactive Shell
# /////////////////////////////////////////////////////////////////
def _interactive_download(self):
# Try the GUI first; if that doesn't work, try the simple
# interactive shell.
if TKINTER:
try:
DownloaderGUI(self).mainloop()
except TclError:
DownloaderShell(self).run()
else:
DownloaderShell(self).run()
class DownloaderShell(object):
def __init__(self, dataserver):
self._ds = dataserver
def _simple_interactive_menu(self, *options):
print("-" * 75)
spc = (68 - sum(len(o) for o in options)) // (len(options) - 1) * " "
print(" " + spc.join(options))
print("-" * 75)
def run(self):
print("NLTK Downloader")
while True:
self._simple_interactive_menu(
"d) Download",
"l) List",
" u) Update",
"c) Config",
"h) Help",
"q) Quit",
)
user_input = input("Downloader> ").strip()
if not user_input:
print()
continue
command = user_input.lower().split()[0]
args = user_input.split()[1:]
try:
if command == "l":
print()
self._ds.list(self._ds.download_dir, header=False, more_prompt=True)
elif command == "h":
self._simple_interactive_help()
elif command == "c":
self._simple_interactive_config()
elif command in ("q", "x"):
return
elif command == "d":
self._simple_interactive_download(args)
elif command == "u":
self._simple_interactive_update()
else:
print("Command %r unrecognized" % user_input)
except HTTPError as e:
print("Error reading from server: %s" % e)
except URLError as e:
print("Error connecting to server: %s" % e.reason)
# try checking if user_input is a package name, &
# downloading it?
print()
def _simple_interactive_download(self, args):
if args:
for arg in args:
try:
self._ds.download(arg, prefix=" ")
except (IOError, ValueError) as e:
print(e)
else:
while True:
print()
print("Download which package (l=list; x=cancel)?")
user_input = input(" Identifier> ")
if user_input.lower() == "l":
self._ds.list(
self._ds.download_dir,
header=False,
more_prompt=True,
skip_installed=True,
)
continue
elif user_input.lower() in ("x", "q", ""):
return
elif user_input:
for id in user_input.split():
try:
self._ds.download(id, prefix=" ")
except (IOError, ValueError) as e:
print(e)
break
def _simple_interactive_update(self):
while True:
stale_packages = []
stale = partial = False
for info in sorted(getattr(self._ds, "packages")(), key=str):
if self._ds.status(info) == self._ds.STALE:
stale_packages.append((info.id, info.name))
print()
if stale_packages:
print("Will update following packages (o=ok; x=cancel)")
for pid, pname in stale_packages:
name = textwrap.fill(
"-" * 27 + (pname), 75, subsequent_indent=27 * " "
)[27:]
print(" [ ] %s %s" % (pid.ljust(20, "."), name))
print()
user_input = input(" Identifier> ")
if user_input.lower() == "o":
for pid, pname in stale_packages:
try:
self._ds.download(pid, prefix=" ")
except (IOError, ValueError) as e:
print(e)
break
elif user_input.lower() in ("x", "q", ""):
return
else:
print("Nothing to update.")
return
def _simple_interactive_help(self):
print()
print("Commands:")
print(
" d) Download a package or collection u) Update out of date packages"
)
print(" l) List packages & collections h) Help")
print(" c) View & Modify Configuration q) Quit")
def _show_config(self):
print()
print("Data Server:")
print(" - URL: <%s>" % self._ds.url)
print((" - %d Package Collections Available" % len(self._ds.collections())))
print((" - %d Individual Packages Available" % len(self._ds.packages())))
print()
print("Local Machine:")
print(" - Data directory: %s" % self._ds.download_dir)
def _simple_interactive_config(self):
self._show_config()
while True:
print()
self._simple_interactive_menu(
"s) Show Config", "u) Set Server URL", "d) Set Data Dir", "m) Main Menu"
)
user_input = input("Config> ").strip().lower()
if user_input == "s":
self._show_config()
elif user_input == "d":
new_dl_dir = input(" New Directory> ").strip()
if new_dl_dir in ("", "x", "q", "X", "Q"):
print(" Cancelled!")
elif os.path.isdir(new_dl_dir):
self._ds.download_dir = new_dl_dir
else:
print(("Directory %r not found! Create it first." % new_dl_dir))
elif user_input == "u":
new_url = input(" New URL> ").strip()
if new_url in ("", "x", "q", "X", "Q"):
print(" Cancelled!")
else:
if not new_url.startswith(("http://", "https://")):
new_url = "http://" + new_url
try:
self._ds.url = new_url
except Exception as e:
print("Error reading <%r>:\n %s" % (new_url, e))
elif user_input == "m":
break
class DownloaderGUI(object):
"""
Graphical interface for downloading packages from the NLTK data
server.
"""
# /////////////////////////////////////////////////////////////////
# Column Configuration
# /////////////////////////////////////////////////////////////////
COLUMNS = [
"",
"Identifier",
"Name",
"Size",
"Status",
"Unzipped Size",
"Copyright",
"Contact",
"License",
"Author",
"Subdir",
"Checksum",
]
"""A list of the names of columns. This controls the order in
which the columns will appear. If this is edited, then
``_package_to_columns()`` may need to be edited to match."""
COLUMN_WEIGHTS = {"": 0, "Name": 5, "Size": 0, "Status": 0}
"""A dictionary specifying how columns should be resized when the
table is resized. Columns with weight 0 will not be resized at
all; and columns with high weight will be resized more.
Default weight (for columns not explicitly listed) is 1."""
COLUMN_WIDTHS = {
"": 1,
"Identifier": 20,
"Name": 45,
"Size": 10,
"Unzipped Size": 10,
"Status": 12,
}
"""A dictionary specifying how wide each column should be, in
characters. The default width (for columns not explicitly
listed) is specified by ``DEFAULT_COLUMN_WIDTH``."""
DEFAULT_COLUMN_WIDTH = 30
"""The default width for columns that are not explicitly listed
in ``COLUMN_WIDTHS``."""
INITIAL_COLUMNS = ["", "Identifier", "Name", "Size", "Status"]
"""The set of columns that should be displayed by default."""
# Perform a few import-time sanity checks to make sure that the
# column configuration variables are defined consistently:
for c in COLUMN_WEIGHTS:
assert c in COLUMNS
for c in COLUMN_WIDTHS:
assert c in COLUMNS
for c in INITIAL_COLUMNS:
assert c in COLUMNS
# /////////////////////////////////////////////////////////////////
# Color Configuration
# /////////////////////////////////////////////////////////////////
_BACKDROP_COLOR = ("#000", "#ccc")
_ROW_COLOR = {
Downloader.INSTALLED: ("#afa", "#080"),
Downloader.PARTIAL: ("#ffa", "#880"),
Downloader.STALE: ("#faa", "#800"),
Downloader.NOT_INSTALLED: ("#fff", "#888"),
}
_MARK_COLOR = ("#000", "#ccc")
# _FRONT_TAB_COLOR = ('#ccf', '#008')
# _BACK_TAB_COLOR = ('#88a', '#448')
_FRONT_TAB_COLOR = ("#fff", "#45c")
_BACK_TAB_COLOR = ("#aaa", "#67a")
_PROGRESS_COLOR = ("#f00", "#aaa")
_TAB_FONT = "helvetica -16 bold"
# /////////////////////////////////////////////////////////////////
# Constructor
# /////////////////////////////////////////////////////////////////
def __init__(self, dataserver, use_threads=True):
self._ds = dataserver
self._use_threads = use_threads
# For the threaded downloader:
self._download_lock = threading.Lock()
self._download_msg_queue = []
self._download_abort_queue = []
self._downloading = False
# For tkinter after callbacks:
self._afterid = {}
# A message log.
self._log_messages = []
self._log_indent = 0
self._log("NLTK Downloader Started!")
# Create the main window.
top = self.top = Tk()
top.geometry("+50+50")
top.title("NLTK Downloader")
top.configure(background=self._BACKDROP_COLOR[1])
# Set up some bindings now, in case anything goes wrong.
top.bind("<Control-q>", self.destroy)
top.bind("<Control-x>", self.destroy)
self._destroyed = False
self._column_vars = {}
# Initialize the GUI.
self._init_widgets()
self._init_menu()
try:
self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
self._show_info()
self._select_columns()
self._table.select(0)
# Make sure we get notified when we're destroyed, so we can
# cancel any download in progress.
self._table.bind("<Destroy>", self._destroy)
def _log(self, msg):
self._log_messages.append(
"%s %s%s" % (time.ctime(), " | " * self._log_indent, msg)
)
# /////////////////////////////////////////////////////////////////
# Internals
# /////////////////////////////////////////////////////////////////
def _init_widgets(self):
# Create the top-level frame structures
f1 = Frame(self.top, relief="raised", border=2, padx=8, pady=0)
f1.pack(sid="top", expand=True, fill="both")
f1.grid_rowconfigure(2, weight=1)
f1.grid_columnconfigure(0, weight=1)
Frame(f1, height=8).grid(column=0, row=0) # spacer
tabframe = Frame(f1)
tabframe.grid(column=0, row=1, sticky="news")
tableframe = Frame(f1)
tableframe.grid(column=0, row=2, sticky="news")
buttonframe = Frame(f1)
buttonframe.grid(column=0, row=3, sticky="news")
Frame(f1, height=8).grid(column=0, row=4) # spacer
infoframe = Frame(f1)
infoframe.grid(column=0, row=5, sticky="news")
Frame(f1, height=8).grid(column=0, row=6) # spacer
progressframe = Frame(
self.top, padx=3, pady=3, background=self._BACKDROP_COLOR[1]
)
progressframe.pack(side="bottom", fill="x")
self.top["border"] = 0
self.top["highlightthickness"] = 0
# Create the tabs
self._tab_names = ["Collections", "Corpora", "Models", "All Packages"]
self._tabs = {}
for i, tab in enumerate(self._tab_names):
label = Label(tabframe, text=tab, font=self._TAB_FONT)
label.pack(side="left", padx=((i + 1) % 2) * 10)
label.bind("<Button-1>", self._select_tab)
self._tabs[tab.lower()] = label
# Create the table.
column_weights = [self.COLUMN_WEIGHTS.get(column, 1) for column in self.COLUMNS]
self._table = Table(
tableframe,
self.COLUMNS,
column_weights=column_weights,
highlightthickness=0,
listbox_height=16,
reprfunc=self._table_reprfunc,
)
self._table.columnconfig(0, foreground=self._MARK_COLOR[0]) # marked
for i, column in enumerate(self.COLUMNS):
width = self.COLUMN_WIDTHS.get(column, self.DEFAULT_COLUMN_WIDTH)
self._table.columnconfig(i, width=width)
self._table.pack(expand=True, fill="both")
self._table.focus()
self._table.bind_to_listboxes("<Double-Button-1>", self._download)
self._table.bind("<space>", self._table_mark)
self._table.bind("<Return>", self._download)
self._table.bind("<Left>", self._prev_tab)
self._table.bind("<Right>", self._next_tab)
self._table.bind("<Control-a>", self._mark_all)
# Create entry boxes for URL & download_dir
infoframe.grid_columnconfigure(1, weight=1)
info = [
("url", "Server Index:", self._set_url),
("download_dir", "Download Directory:", self._set_download_dir),
]
self._info = {}
for (i, (key, label, callback)) in enumerate(info):
Label(infoframe, text=label).grid(column=0, row=i, sticky="e")
entry = Entry(
infoframe, font="courier", relief="groove", disabledforeground="black"
)
self._info[key] = (entry, callback)
entry.bind("<Return>", self._info_save)
entry.bind("<Button-1>", lambda e, key=key: self._info_edit(key))
entry.grid(column=1, row=i, sticky="ew")
# If the user edits url or download_dir, and then clicks outside
# the entry box, then save their results.
self.top.bind("<Button-1>", self._info_save)
# Create Download & Refresh buttons.
self._download_button = Button(
buttonframe, text="Download", command=self._download, width=8
)
self._download_button.pack(side="left")
self._refresh_button = Button(
buttonframe, text="Refresh", command=self._refresh, width=8
)
self._refresh_button.pack(side="right")
# Create Progress bar
self._progresslabel = Label(
progressframe,
text="",
foreground=self._BACKDROP_COLOR[0],
background=self._BACKDROP_COLOR[1],
)
self._progressbar = Canvas(
progressframe,
width=200,
height=16,
background=self._PROGRESS_COLOR[1],
relief="sunken",
border=1,
)
self._init_progressbar()
self._progressbar.pack(side="right")
self._progresslabel.pack(side="left")
def _init_menu(self):
menubar = Menu(self.top)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(
label="Download", underline=0, command=self._download, accelerator="Return"
)
filemenu.add_separator()
filemenu.add_command(
label="Change Server Index",
underline=7,
command=lambda: self._info_edit("url"),
)
filemenu.add_command(
label="Change Download Directory",
underline=0,
command=lambda: self._info_edit("download_dir"),
)
filemenu.add_separator()
filemenu.add_command(label="Show Log", underline=5, command=self._show_log)
filemenu.add_separator()
filemenu.add_command(
label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x"
)
menubar.add_cascade(label="File", underline=0, menu=filemenu)
# Create a menu to control which columns of the table are
# shown. n.b.: we never hide the first two columns (mark and
# identifier).
viewmenu = Menu(menubar, tearoff=0)
for column in self._table.column_names[2:]:
var = IntVar(self.top)
assert column not in self._column_vars
self._column_vars[column] = var
if column in self.INITIAL_COLUMNS:
var.set(1)
viewmenu.add_checkbutton(
label=column, underline=0, variable=var, command=self._select_columns
)
menubar.add_cascade(label="View", underline=0, menu=viewmenu)
# Create a sort menu
# [xx] this should be selectbuttons; and it should include
# reversed sorts as options.
sortmenu = Menu(menubar, tearoff=0)
for column in self._table.column_names[1:]:
sortmenu.add_command(
label="Sort by %s" % column,
command=(lambda c=column: self._table.sort_by(c, "ascending")),
)
sortmenu.add_separator()
# sortmenu.add_command(label='Descending Sort:')
for column in self._table.column_names[1:]:
sortmenu.add_command(
label="Reverse sort by %s" % column,
command=(lambda c=column: self._table.sort_by(c, "descending")),
)
menubar.add_cascade(label="Sort", underline=0, menu=sortmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", underline=0, command=self.about)
helpmenu.add_command(
label="Instructions", underline=0, command=self.help, accelerator="F1"
)
menubar.add_cascade(label="Help", underline=0, menu=helpmenu)
self.top.bind("<F1>", self.help)
self.top.config(menu=menubar)
def _select_columns(self):
for (column, var) in self._column_vars.items():
if var.get():
self._table.show_column(column)
else:
self._table.hide_column(column)
def _refresh(self):
self._ds.clear_status_cache()
try:
self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
self._table.select(0)
def _info_edit(self, info_key):
self._info_save() # just in case.
(entry, callback) = self._info[info_key]
entry["state"] = "normal"
entry["relief"] = "sunken"
entry.focus()
def _info_save(self, e=None):
focus = self._table
for entry, callback in self._info.values():
if entry["state"] == "disabled":
continue
if e is not None and e.widget is entry and e.keysym != "Return":
focus = entry
else:
entry["state"] = "disabled"
entry["relief"] = "groove"
callback(entry.get())
focus.focus()
def _table_reprfunc(self, row, col, val):
if self._table.column_names[col].endswith("Size"):
if isinstance(val, str):
return " %s" % val
elif val < 1024 ** 2:
return " %.1f KB" % (val / 1024.0 ** 1)
elif val < 1024 ** 3:
return " %.1f MB" % (val / 1024.0 ** 2)
else:
return " %.1f GB" % (val / 1024.0 ** 3)
if col in (0, ""):
return str(val)
else:
return " %s" % val
def _set_url(self, url):
if url == self._ds.url:
return
try:
self._ds.url = url
self._fill_table()
except IOError as e:
showerror("Error Setting Server Index", str(e))
self._show_info()
def _set_download_dir(self, download_dir):
if self._ds.download_dir == download_dir:
return
# check if the dir exists, and if not, ask if we should create it?
# Clear our status cache, & re-check what's installed
self._ds.download_dir = download_dir
try:
self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
self._show_info()
def _show_info(self):
print("showing info", self._ds.url)
for entry, cb in self._info.values():
entry["state"] = "normal"
entry.delete(0, "end")
self._info["url"][0].insert(0, self._ds.url)
self._info["download_dir"][0].insert(0, self._ds.download_dir)
for entry, cb in self._info.values():
entry["state"] = "disabled"
def _prev_tab(self, *e):
for i, tab in enumerate(self._tab_names):
if tab.lower() == self._tab and i > 0:
self._tab = self._tab_names[i - 1].lower()
try:
return self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
def _next_tab(self, *e):
for i, tab in enumerate(self._tab_names):
if tab.lower() == self._tab and i < (len(self._tabs) - 1):
self._tab = self._tab_names[i + 1].lower()
try:
return self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
def _select_tab(self, event):
self._tab = event.widget["text"].lower()
try:
self._fill_table()
except HTTPError as e:
showerror("Error reading from server", e)
except URLError as e:
showerror("Error connecting to server", e.reason)
_tab = "collections"
# _tab = 'corpora'
_rows = None
def _fill_table(self):
selected_row = self._table.selected_row()
self._table.clear()
if self._tab == "all packages":
items = self._ds.packages()
elif self._tab == "corpora":
items = self._ds.corpora()
elif self._tab == "models":
items = self._ds.models()
elif self._tab == "collections":
items = self._ds.collections()
else:
assert 0, "bad tab value %r" % self._tab
rows = [self._package_to_columns(item) for item in items]
self._table.extend(rows)
# Highlight the active tab.
for tab, label in self._tabs.items():
if tab == self._tab:
label.configure(
foreground=self._FRONT_TAB_COLOR[0],
background=self._FRONT_TAB_COLOR[1],
)
else:
label.configure(
foreground=self._BACK_TAB_COLOR[0],
background=self._BACK_TAB_COLOR[1],
)
self._table.sort_by("Identifier", order="ascending")
self._color_table()
self._table.select(selected_row)
# This is a hack, because the scrollbar isn't updating its
# position right -- I'm not sure what the underlying cause is
# though. (This is on OS X w/ python 2.5) The length of
# delay that's necessary seems to depend on how fast the
# comptuer is. :-/
self.top.after(150, self._table._scrollbar.set, *self._table._mlb.yview())
self.top.after(300, self._table._scrollbar.set, *self._table._mlb.yview())
def _update_table_status(self):
for row_num in range(len(self._table)):
status = self._ds.status(self._table[row_num, "Identifier"])
self._table[row_num, "Status"] = status
self._color_table()
def _download(self, *e):
# If we're using threads, then delegate to the threaded
# downloader instead.
if self._use_threads:
return self._download_threaded(*e)
marked = [
self._table[row, "Identifier"]
for row in range(len(self._table))
if self._table[row, 0] != ""
]
selection = self._table.selected_row()
if not marked and selection is not None:
marked = [self._table[selection, "Identifier"]]
download_iter = self._ds.incr_download(marked, self._ds.download_dir)
self._log_indent = 0
self._download_cb(download_iter, marked)
_DL_DELAY = 10
def _download_cb(self, download_iter, ids):
try:
msg = next(download_iter)
except StopIteration:
# self._fill_table(sort=False)
self._update_table_status()
afterid = self.top.after(10, self._show_progress, 0)
self._afterid["_download_cb"] = afterid
return
def show(s):
self._progresslabel["text"] = s
self._log(s)
if isinstance(msg, ProgressMessage):
self._show_progress(msg.progress)
elif isinstance(msg, ErrorMessage):
show(msg.message)
if msg.package is not None:
self._select(msg.package.id)
self._show_progress(None)
return # halt progress.
elif isinstance(msg, StartCollectionMessage):
show("Downloading collection %s" % msg.collection.id)
self._log_indent += 1
elif isinstance(msg, StartPackageMessage):
show("Downloading package %s" % msg.package.id)
elif isinstance(msg, UpToDateMessage):
show("Package %s is up-to-date!" % msg.package.id)
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt' % msg.package.id)
elif isinstance(msg, FinishDownloadMessage):
show("Finished downloading %r." % msg.package.id)
elif isinstance(msg, StartUnzipMessage):
show("Unzipping %s" % msg.package.filename)
elif isinstance(msg, FinishCollectionMessage):
self._log_indent -= 1
show("Finished downloading collection %r." % msg.collection.id)
self._clear_mark(msg.collection.id)
elif isinstance(msg, FinishPackageMessage):
self._clear_mark(msg.package.id)
afterid = self.top.after(self._DL_DELAY, self._download_cb, download_iter, ids)
self._afterid["_download_cb"] = afterid
def _select(self, id):
for row in range(len(self._table)):
if self._table[row, "Identifier"] == id:
self._table.select(row)
return
def _color_table(self):
# Color rows according to status.
for row in range(len(self._table)):
bg, sbg = self._ROW_COLOR[self._table[row, "Status"]]
fg, sfg = ("black", "white")
self._table.rowconfig(
row,
foreground=fg,
selectforeground=sfg,
background=bg,
selectbackground=sbg,
)
# Color the marked column
self._table.itemconfigure(
row, 0, foreground=self._MARK_COLOR[0], background=self._MARK_COLOR[1]
)
def _clear_mark(self, id):
for row in range(len(self._table)):
if self._table[row, "Identifier"] == id:
self._table[row, 0] = ""
def _mark_all(self, *e):
for row in range(len(self._table)):
self._table[row, 0] = "X"
def _table_mark(self, *e):
selection = self._table.selected_row()
if selection >= 0:
if self._table[selection][0] != "":
self._table[selection, 0] = ""
else:
self._table[selection, 0] = "X"
self._table.select(delta=1)
def _show_log(self):
text = "\n".join(self._log_messages)
ShowText(self.top, "NLTK Downloader Log", text)
def _package_to_columns(self, pkg):
"""
Given a package, return a list of values describing that
package, one for each column in ``self.COLUMNS``.
"""
row = []
for column_index, column_name in enumerate(self.COLUMNS):
if column_index == 0: # Mark:
row.append("")
elif column_name == "Identifier":
row.append(pkg.id)
elif column_name == "Status":
row.append(self._ds.status(pkg))
else:
attr = column_name.lower().replace(" ", "_")
row.append(getattr(pkg, attr, "n/a"))
return row
# /////////////////////////////////////////////////////////////////
# External Interface
# /////////////////////////////////////////////////////////////////
def destroy(self, *e):
if self._destroyed:
return
self.top.destroy()
self._destroyed = True
def _destroy(self, *e):
if self.top is not None:
for afterid in self._afterid.values():
self.top.after_cancel(afterid)
# Abort any download in progress.
if self._downloading and self._use_threads:
self._abort_download()
# Make sure the garbage collector destroys these now;
# otherwise, they may get destroyed when we're not in the main
# thread, which would make Tkinter unhappy.
self._column_vars.clear()
def mainloop(self, *args, **kwargs):
self.top.mainloop(*args, **kwargs)
# /////////////////////////////////////////////////////////////////
# HELP
# /////////////////////////////////////////////////////////////////
HELP = textwrap.dedent(
"""\
This tool can be used to download a variety of corpora and models
that can be used with NLTK. Each corpus or model is distributed
in a single zip file, known as a \"package file.\" You can
download packages individually, or you can download pre-defined
collections of packages.
When you download a package, it will be saved to the \"download
directory.\" A default download directory is chosen when you run
the downloader; but you may also select a different download
directory. On Windows, the default download directory is
\"package.\"
The NLTK downloader can be used to download a variety of corpora,
models, and other data packages.
Keyboard shortcuts::
[return]\t Download
[up]\t Select previous package
[down]\t Select next package
[left]\t Select previous tab
[right]\t Select next tab
"""
)
def help(self, *e):
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(
self.top,
"Help: NLTK Dowloader",
self.HELP.strip(),
width=75,
font="fixed",
)
except:
ShowText(self.top, "Help: NLTK Downloader", self.HELP.strip(), width=75)
def about(self, *e):
ABOUT = "NLTK Downloader\n" + "Written by Edward Loper"
TITLE = "About: NLTK Downloader"
try:
from tkinter.messagebox import Message
Message(message=ABOUT, title=TITLE).show()
except ImportError:
ShowText(self.top, TITLE, ABOUT)
# /////////////////////////////////////////////////////////////////
# Progress Bar
# /////////////////////////////////////////////////////////////////
_gradient_width = 5
def _init_progressbar(self):
c = self._progressbar
width, height = int(c["width"]), int(c["height"])
for i in range(0, (int(c["width"]) * 2) // self._gradient_width):
c.create_line(
i * self._gradient_width + 20,
-20,
i * self._gradient_width - height - 20,
height + 20,
width=self._gradient_width,
fill="#%02x0000" % (80 + abs(i % 6 - 3) * 12),
)
c.addtag_all("gradient")
c.itemconfig("gradient", state="hidden")
# This is used to display progress
c.addtag_withtag(
"redbox", c.create_rectangle(0, 0, 0, 0, fill=self._PROGRESS_COLOR[0])
)
def _show_progress(self, percent):
c = self._progressbar
if percent is None:
c.coords("redbox", 0, 0, 0, 0)
c.itemconfig("gradient", state="hidden")
else:
width, height = int(c["width"]), int(c["height"])
x = percent * int(width) // 100 + 1
c.coords("redbox", 0, 0, x, height + 1)
def _progress_alive(self):
c = self._progressbar
if not self._downloading:
c.itemconfig("gradient", state="hidden")
else:
c.itemconfig("gradient", state="normal")
x1, y1, x2, y2 = c.bbox("gradient")
if x1 <= -100:
c.move("gradient", (self._gradient_width * 6) - 4, 0)
else:
c.move("gradient", -4, 0)
afterid = self.top.after(200, self._progress_alive)
self._afterid["_progress_alive"] = afterid
# /////////////////////////////////////////////////////////////////
# Threaded downloader
# /////////////////////////////////////////////////////////////////
def _download_threaded(self, *e):
# If the user tries to start a new download while we're already
# downloading something, then abort the current download instead.
if self._downloading:
self._abort_download()
return
# Change the 'download' button to an 'abort' button.
self._download_button["text"] = "Cancel"
marked = [
self._table[row, "Identifier"]
for row in range(len(self._table))
if self._table[row, 0] != ""
]
selection = self._table.selected_row()
if not marked and selection is not None:
marked = [self._table[selection, "Identifier"]]
# Create a new data server object for the download operation,
# just in case the user modifies our data server during the
# download (e.g., clicking 'refresh' or editing the index url).
ds = Downloader(self._ds.url, self._ds.download_dir)
# Start downloading in a separate thread.
assert self._download_msg_queue == []
assert self._download_abort_queue == []
self._DownloadThread(
ds,
marked,
self._download_lock,
self._download_msg_queue,
self._download_abort_queue,
).start()
# Monitor the download message queue & display its progress.
self._log_indent = 0
self._downloading = True
self._monitor_message_queue()
# Display an indication that we're still alive and well by
# cycling the progress bar.
self._progress_alive()
def _abort_download(self):
if self._downloading:
self._download_lock.acquire()
self._download_abort_queue.append("abort")
self._download_lock.release()
class _DownloadThread(threading.Thread):
def __init__(self, data_server, items, lock, message_queue, abort):
self.data_server = data_server
self.items = items
self.lock = lock
self.message_queue = message_queue
self.abort = abort
threading.Thread.__init__(self)
def run(self):
for msg in self.data_server.incr_download(self.items):
self.lock.acquire()
self.message_queue.append(msg)
# Check if we've been told to kill ourselves:
if self.abort:
self.message_queue.append("aborted")
self.lock.release()
return
self.lock.release()
self.lock.acquire()
self.message_queue.append("finished")
self.lock.release()
_MONITOR_QUEUE_DELAY = 100
def _monitor_message_queue(self):
def show(s):
self._progresslabel["text"] = s
self._log(s)
# Try to acquire the lock; if it's busy, then just try again later.
if not self._download_lock.acquire():
return
for msg in self._download_msg_queue:
# Done downloading?
if msg == "finished" or msg == "aborted":
# self._fill_table(sort=False)
self._update_table_status()
self._downloading = False
self._download_button["text"] = "Download"
del self._download_msg_queue[:]
del self._download_abort_queue[:]
self._download_lock.release()
if msg == "aborted":
show("Download aborted!")
self._show_progress(None)
else:
afterid = self.top.after(100, self._show_progress, None)
self._afterid["_monitor_message_queue"] = afterid
return
# All other messages
elif isinstance(msg, ProgressMessage):
self._show_progress(msg.progress)
elif isinstance(msg, ErrorMessage):
show(msg.message)
if msg.package is not None:
self._select(msg.package.id)
self._show_progress(None)
self._downloading = False
return # halt progress.
elif isinstance(msg, StartCollectionMessage):
show("Downloading collection %r" % msg.collection.id)
self._log_indent += 1
elif isinstance(msg, StartPackageMessage):
self._ds.clear_status_cache(msg.package.id)
show("Downloading package %r" % msg.package.id)
elif isinstance(msg, UpToDateMessage):
show("Package %s is up-to-date!" % msg.package.id)
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt; updating it' %
# msg.package.id)
elif isinstance(msg, FinishDownloadMessage):
show("Finished downloading %r." % msg.package.id)
elif isinstance(msg, StartUnzipMessage):
show("Unzipping %s" % msg.package.filename)
elif isinstance(msg, FinishUnzipMessage):
show("Finished installing %s" % msg.package.id)
elif isinstance(msg, FinishCollectionMessage):
self._log_indent -= 1
show("Finished downloading collection %r." % msg.collection.id)
self._clear_mark(msg.collection.id)
elif isinstance(msg, FinishPackageMessage):
self._update_table_status()
self._clear_mark(msg.package.id)
# Let the user know when we're aborting a download (but
# waiting for a good point to abort it, so we don't end up
# with a partially unzipped package or anything like that).
if self._download_abort_queue:
self._progresslabel["text"] = "Aborting download..."
# Clear the message queue and then release the lock
del self._download_msg_queue[:]
self._download_lock.release()
# Check the queue again after MONITOR_QUEUE_DELAY msec.
afterid = self.top.after(self._MONITOR_QUEUE_DELAY, self._monitor_message_queue)
self._afterid["_monitor_message_queue"] = afterid
######################################################################
# Helper Functions
######################################################################
# [xx] It may make sense to move these to nltk.internals.
def md5_hexdigest(file):
"""
Calculate and return the MD5 checksum for a given file.
``file`` may either be a filename or an open stream.
"""
if isinstance(file, str):
with open(file, "rb") as infile:
return _md5_hexdigest(infile)
return _md5_hexdigest(file)
def _md5_hexdigest(fp):
md5_digest = md5()
while True:
block = fp.read(1024 * 16) # 16k blocks
if not block:
break
md5_digest.update(block)
return md5_digest.hexdigest()
# change this to periodically yield progress messages?
# [xx] get rid of topdir parameter -- we should be checking
# this when we build the index, anyway.
def unzip(filename, root, verbose=True):
"""
Extract the contents of the zip file ``filename`` into the
directory ``root``.
"""
for message in _unzip_iter(filename, root, verbose):
if isinstance(message, ErrorMessage):
raise Exception(message)
def _unzip_iter(filename, root, verbose=True):
if verbose:
sys.stdout.write("Unzipping %s" % os.path.split(filename)[1])
sys.stdout.flush()
try:
zf = zipfile.ZipFile(filename)
except zipfile.error as e:
yield ErrorMessage(filename, "Error with downloaded zip file")
return
except Exception as e:
yield ErrorMessage(filename, e)
return
zf.extractall(root)
if verbose:
print()
######################################################################
# Index Builder
######################################################################
# This may move to a different file sometime.
def build_index(root, base_url):
"""
Create a new data.xml index file, by combining the xml description
files for various packages and collections. ``root`` should be the
path to a directory containing the package xml and zip files; and
the collection xml files. The ``root`` directory is expected to
have the following subdirectories::
root/
packages/ .................. subdirectory for packages
corpora/ ................. zip & xml files for corpora
grammars/ ................ zip & xml files for grammars
taggers/ ................. zip & xml files for taggers
tokenizers/ .............. zip & xml files for tokenizers
etc.
collections/ ............... xml files for collections
For each package, there should be two files: ``package.zip``
(where *package* is the package name)
which contains the package itself as a compressed zip file; and
``package.xml``, which is an xml description of the package. The
zipfile ``package.zip`` should expand to a single subdirectory
named ``package/``. The base filename ``package`` must match
the identifier given in the package's xml file.
For each collection, there should be a single file ``collection.zip``
describing the collection, where *collection* is the name of the collection.
All identifiers (for both packages and collections) must be unique.
"""
# Find all packages.
packages = []
for pkg_xml, zf, subdir in _find_packages(os.path.join(root, "packages")):
zipstat = os.stat(zf.filename)
url = "%s/%s/%s" % (base_url, subdir, os.path.split(zf.filename)[1])
unzipped_size = sum(zf_info.file_size for zf_info in zf.infolist())
# Fill in several fields of the package xml with calculated values.
pkg_xml.set("unzipped_size", "%s" % unzipped_size)
pkg_xml.set("size", "%s" % zipstat.st_size)
pkg_xml.set("checksum", "%s" % md5_hexdigest(zf.filename))
pkg_xml.set("subdir", subdir)
# pkg_xml.set('svn_revision', _svn_revision(zf.filename))
if not pkg_xml.get("url"):
pkg_xml.set("url", url)
# Record the package.
packages.append(pkg_xml)
# Find all collections
collections = list(_find_collections(os.path.join(root, "collections")))
# Check that all UIDs are unique
uids = set()
for item in packages + collections:
if item.get("id") in uids:
raise ValueError("Duplicate UID: %s" % item.get("id"))
uids.add(item.get("id"))
# Put it all together
top_elt = ElementTree.Element("nltk_data")
top_elt.append(ElementTree.Element("packages"))
for package in packages:
top_elt[0].append(package)
top_elt.append(ElementTree.Element("collections"))
for collection in collections:
top_elt[1].append(collection)
_indent_xml(top_elt)
return top_elt
def _indent_xml(xml, prefix=""):
"""
Helper for ``build_index()``: Given an XML ``ElementTree``, modify it
(and its descendents) ``text`` and ``tail`` attributes to generate
an indented tree, where each nested element is indented by 2
spaces with respect to its parent.
"""
if len(xml) > 0:
xml.text = (xml.text or "").strip() + "\n" + prefix + " "
for child in xml:
_indent_xml(child, prefix + " ")
for child in xml[:-1]:
child.tail = (child.tail or "").strip() + "\n" + prefix + " "
xml[-1].tail = (xml[-1].tail or "").strip() + "\n" + prefix
def _check_package(pkg_xml, zipfilename, zf):
"""
Helper for ``build_index()``: Perform some checks to make sure that
the given package is consistent.
"""
# The filename must patch the id given in the XML file.
uid = os.path.splitext(os.path.split(zipfilename)[1])[0]
if pkg_xml.get("id") != uid:
raise ValueError(
"package identifier mismatch (%s vs %s)" % (pkg_xml.get("id"), uid)
)
# Zip file must expand to a subdir whose name matches uid.
if sum((name != uid and not name.startswith(uid + "/")) for name in zf.namelist()):
raise ValueError(
"Zipfile %s.zip does not expand to a single "
"subdirectory %s/" % (uid, uid)
)
# update for git?
def _svn_revision(filename):
"""
Helper for ``build_index()``: Calculate the subversion revision
number for a given file (by using ``subprocess`` to run ``svn``).
"""
p = subprocess.Popen(
["svn", "status", "-v", filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(stdout, stderr) = p.communicate()
if p.returncode != 0 or stderr or not stdout:
raise ValueError(
"Error determining svn_revision for %s: %s"
% (os.path.split(filename)[1], textwrap.fill(stderr))
)
return stdout.split()[2]
def _find_collections(root):
"""
Helper for ``build_index()``: Yield a list of ElementTree.Element
objects, each holding the xml for a single package collection.
"""
packages = []
for dirname, subdirs, files in os.walk(root):
for filename in files:
if filename.endswith(".xml"):
xmlfile = os.path.join(dirname, filename)
yield ElementTree.parse(xmlfile).getroot()
def _find_packages(root):
"""
Helper for ``build_index()``: Yield a list of tuples
``(pkg_xml, zf, subdir)``, where:
- ``pkg_xml`` is an ``ElementTree.Element`` holding the xml for a
package
- ``zf`` is a ``zipfile.ZipFile`` for the package's contents.
- ``subdir`` is the subdirectory (relative to ``root``) where
the package was found (e.g. 'corpora' or 'grammars').
"""
from nltk.corpus.reader.util import _path_from
# Find all packages.
packages = []
for dirname, subdirs, files in os.walk(root):
relpath = "/".join(_path_from(root, dirname))
for filename in files:
if filename.endswith(".xml"):
xmlfilename = os.path.join(dirname, filename)
zipfilename = xmlfilename[:-4] + ".zip"
try:
zf = zipfile.ZipFile(zipfilename)
except Exception as e:
raise ValueError("Error reading file %r!\n%s" % (zipfilename, e))
try:
pkg_xml = ElementTree.parse(xmlfilename).getroot()
except Exception as e:
raise ValueError("Error reading file %r!\n%s" % (xmlfilename, e))
# Check that the UID matches the filename
uid = os.path.split(xmlfilename[:-4])[1]
if pkg_xml.get("id") != uid:
raise ValueError(
"package identifier mismatch (%s "
"vs %s)" % (pkg_xml.get("id"), uid)
)
# Check that the zipfile expands to a subdir whose
# name matches the uid.
if sum(
(name != uid and not name.startswith(uid + "/"))
for name in zf.namelist()
):
raise ValueError(
"Zipfile %s.zip does not expand to a "
"single subdirectory %s/" % (uid, uid)
)
yield pkg_xml, zf, relpath
# Don't recurse into svn subdirectories:
try:
subdirs.remove(".svn")
except ValueError:
pass
######################################################################
# Main:
######################################################################
# There should be a command-line interface
# Aliases
_downloader = Downloader()
download = _downloader.download
def download_shell():
DownloaderShell(_downloader).run()
def download_gui():
DownloaderGUI(_downloader).mainloop()
def update():
_downloader.update()
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
"-d",
"--dir",
dest="dir",
help="download package to directory DIR",
metavar="DIR",
)
parser.add_option(
"-q",
"--quiet",
dest="quiet",
action="store_true",
default=False,
help="work quietly",
)
parser.add_option(
"-f",
"--force",
dest="force",
action="store_true",
default=False,
help="download even if already installed",
)
parser.add_option(
"-e",
"--exit-on-error",
dest="halt_on_error",
action="store_true",
default=False,
help="exit if an error occurs",
)
parser.add_option(
"-u",
"--url",
dest="server_index_url",
default=os.environ.get("NLTK_DOWNLOAD_URL"),
help="download server index url",
)
(options, args) = parser.parse_args()
downloader = Downloader(server_index_url=options.server_index_url)
if args:
for pkg_id in args:
rv = downloader.download(
info_or_id=pkg_id,
download_dir=options.dir,
quiet=options.quiet,
force=options.force,
halt_on_error=options.halt_on_error,
)
if rv == False and options.halt_on_error:
break
else:
downloader.download(
download_dir=options.dir,
quiet=options.quiet,
force=options.force,
halt_on_error=options.halt_on_error,
)
| 36.260494
| 88
| 0.55217
|
4a06151399105912117de07d65803a53ec84bb93
| 32,563
|
py
|
Python
|
qe_reader.py
|
Paul-St-Young/solid_hydrogen
|
dd218cd431a283dc1a371a0af5696074d63b8c6c
|
[
"MIT"
] | 2
|
2020-08-13T23:32:03.000Z
|
2021-03-28T01:14:06.000Z
|
qe_reader.py
|
Paul-St-Young/solid_hydrogen
|
dd218cd431a283dc1a371a0af5696074d63b8c6c
|
[
"MIT"
] | null | null | null |
qe_reader.py
|
Paul-St-Young/solid_hydrogen
|
dd218cd431a283dc1a371a0af5696074d63b8c6c
|
[
"MIT"
] | null | null | null |
import numpy as np
from mmap import mmap
from qharv.reel.ascii_out import read, name_sep_val, all_lines_with_tag
def read_first_energy(scf_out):
with open(scf_out,'r+') as f:
mm = mmap(f.fileno(),0)
# end with
idx = mm.find(b'!')
mm.seek(idx)
eline = mm.readline().decode()
energy = float( eline.split()[-2] )
return energy
# end def
def read_forces(scf_out,ndim=3,which='total'):
""" read the forces in a pwscf output, assume only one force block
'which' decides which block of forces to read, choices are:
['total', 'non-local', 'local', 'ionic', 'core', 'Hubbard', 'scf']
!!!! assuming QE uses Ry, will convert to Ha """
Ry = 0.5 # Ha
begin_tag_dict = {
'total':'Forces acting on atoms',
'non-local':'The non-local contrib. to forces',
'ionic':'The ionic contribution to forces',
'local':'The local contribution to forces',
'core':'The core correction contribution to forces',
'Hubbard':'The Hubbard contrib. to forces',
'scf':'The SCF correction term to forces'
}
end_tag_dict = {
'total':'The non-local contrib. to forces',
'non-local':'The ionic contribution to forces',
'ionic':'The local contribution to forces',
'local':'The core correction contribution to forces',
'core':'The Hubbard contrib. to forces',
'Hubbard':'The SCF correction term to forces',
'scf':'Total force ='
}
fhandle = open(scf_out,'r+')
mm = mmap(fhandle.fileno(),0)
natom = name_sep_val(mm,'number of atoms',dtype=int)
# locate force block
begin_tag = begin_tag_dict[which]
end_tag = end_tag_dict[which]
begin_idx = mm.find(begin_tag.encode())
end_idx = mm.find(end_tag.encode())
if begin_idx == -1:
raise RuntimeError('cannot locate %s'%begin_tag)
elif end_idx == -1:
# maybe verbosity='low'
end_idx = mm.find(b'Total force =')
if end_idx == -1:
raise RuntimeError('cannot locate %s'%end_tag)
# end if
# end if
force_block = mm[begin_idx:end_idx]
# parse force block for forces
forces = np.zeros([natom,ndim])
iatom = 0
for line in force_block.split(b'\n'):
if line.strip().startswith(b'atom'):
tokens = line.split()
if len(tokens)==9: # found an atom
myforce = np.array(tokens[-3:],dtype=float)
forces[iatom,:] = tokens[-3:]
iatom += 1
# end if
# end if
# end for
if iatom != natom:
raise RuntimeError('found %d forces for %d atoms'%(iatom,natom))
# end if
fhandle.close()
return forces*Ry
# end def
def retrieve_occupations(nscf_outfile, max_nbnd_lines=10):
""" read the eigenvalues and occupations of DFT orbitals at every available kpoint in an non-scf output produced by pwscf """
from qharv.reel import ascii_out
span = 7
def scanf_7f(line, n):
""" implement scanf("%7.*f") """
numl = []
for i in range(n):
token = line[span*i:span*(i+1)]
num = float(token)
numl.append(num)
return numl
fhandle = open(nscf_outfile,'r+')
mm = mmap(fhandle.fileno(),0)
# read number of k points
nk_prefix = b"number of k points="
idx = mm.find(nk_prefix)
mm.seek(idx)
nk_line = mm.readline()
nk = int( nk_line.strip(nk_prefix).split()[0] )
# skip to the end of band structure calculation
idx = mm.find(b'End of self-consistent calculation')
idx = mm.find(b'End of band structure calculation')
mm.seek(idx)
# read the eigenvalues and occupations at each kpoint
kpt_prefix = "k ="
data = []
for ik in range(nk):
idx = mm.find(kpt_prefix.encode())
mm.seek(idx)
kpt_line = mm.readline()
kxkykz = ascii_out.lr_mark(kpt_line, '=', '(')
kpt = scanf_7f(kxkykz, 3)
mm.readline() # skip empty line
eval_arr = np.array([])
for iline in range(max_nbnd_lines):
tokens = mm.readline().split()
if len(tokens)==0:
break
# end if
eval_arr = np.append(eval_arr, map(float,tokens))
# end for iline
idx = mm.find(b'occupation numbers')
mm.seek(idx)
mm.readline() # skip current line
occ_arr = np.array([])
for iline in range(100):
tokens = mm.readline().split()
if len(tokens)==0:
break
# end if
occ_arr = np.append(occ_arr, map(float,tokens))
# end for iline
entry = {'ik':ik,'kpt':list(kpt),'eval':list(eval_arr),'occ':list(occ_arr)}
data.append(entry)
# end for
mm.close()
fhandle.close()
return data
# end def
import subprocess as sp
def find_pwscf_io(path,infile_subfix='-scf.in',outfile_subfix='.out',use_last=False):
# assuming there is only 1 pair of pw.x input and output in path
# return the names of the input and output files
out = sp.check_output(['ls',path])
infile = ''
outfile = ''
found_in = False
found_out = False
for fname in out.split('\n')[:-1]:
if fname.endswith(infile_subfix):
if found_in and not use_last:
raise NotImplementedError('multiple inputs found in %s'%path)
# end if
infile = fname
found_in = True
elif fname.endswith(outfile_subfix):
if found_out and not use_last:
raise NotImplementedError('multiple outputs found in %s'%path)
# end if
outfile = fname
found_out = True
# end if
# end for fname
if not found_in:
raise IOError('infile not found in %s'%path)
elif not found_out:
raise IOError('outfile not found in %s'%path)
# end if
return infile,outfile
# end def find_pwscf_io
import struct
def available_structures(pw_out,nstruct_max=10000,natom_max=1000,ndim=3
,variable_cell=False):
""" find all available structures in a pwscf output """
fhandle = open(pw_out,'r+')
mm = mmap(fhandle.fileno(),0)
idx = mm.find(b'lattice parameter')
mm.seek(idx)
lat_line = mm.readline()
alat = float( lat_line.split()[-2] )
# locate all axes
axes_tag = 'CELL_PARAMETERS ('.encode()
axes_starts = all_lines_with_tag(mm,axes_tag,nstruct_max)
naxes = len(axes_starts)
if (naxes != 0) and (not variable_cell):
raise NotImplementedError('CELL_PARAMETERS found, are you sure this is not a variable cell run?')
# end if
# crystal coords
crystal_pos = False
# locate all atomic cd positions
pos_tag = 'ATOMIC_POSITIONS'.encode()
pos_starts = all_lines_with_tag(mm,pos_tag,nstruct_max)
npos = len(pos_starts)
if variable_cell and (npos != naxes):
raise NotImplementedError('expect same number of cells as atomic positions in a variable cell calculation. got (naxes,npos)=(%d,%d)'%(naxes,npos))
# end if
# count number of atoms
mm.seek(pos_starts[0])
mm.readline() # skip tag line
natom = 0
for iatom in range(natom_max):
line = mm.readline()
tokens = line.split()
if len(tokens) != 4:
break
# end if
natom += 1
# end for iatom
# read initial crystal axes
axes = np.zeros([ndim,ndim])
if not variable_cell:
idx = all_lines_with_tag(mm,'crystal axes'.encode(),nstruct_max)[0]
mm.seek(idx)
tag_line = mm.readline()
unit_text= tag_line.split()[-1].strip('()')
for idim in range(ndim):
line = mm.readline()
axes[idim,:] = line.split()[3:3+ndim]
if 'alat' in unit_text:
axes[idim,:] *= alat
else:
raise NotImplementedError('crystal axes: what unit is %s?'%unit_text)
# end if
# end for
# end if
bohr = 0.52917721067 # angstrom (CODATA 2014)
nstructs = max(naxes,npos)
all_axes = np.zeros([nstructs,ndim,ndim])
all_pos = np.zeros([nstructs,natom,ndim])
for istruct in range(nstructs):
if variable_cell: # read cell parameters
cell_idx = axes_starts[istruct]
mm.seek(cell_idx)
tag_line = mm.readline() # get unit from tag line
axes_unit = tag_line.split('(')[-1].replace(')','')
if not axes_unit.startswith('alat'):
raise RuntimeError('unknown CELL_PARAMETERS unit %s'%axes_unit)
# end if
alat = float(axes_unit.split('=')[-1])
axes_text = ''
for idim in range(ndim):
axes[idim,:] = mm.readline().split()
# end for idim
axes *= alat
# end if variable_cell
all_axes[istruct,:,:] = axes
pos_idx = pos_starts[istruct]
mm.seek(pos_idx)
tag_line = mm.readline()
unit_text= tag_line.split()[-1]
au2unit = 1. # !!!! assume bohr
if 'angstrom' in unit_text:
au2unit = 1./bohr
elif 'bohr' in unit_text:
au2unit = 1.
elif 'alat' in unit_text:
au2unit = alat
elif 'crystal' in unit_text:
crystal_pos = True
else:
raise NotImplementedError('what unit is this? %s' % unit_text)
# end if
for iatom in range(natom):
line = mm.readline()
name = line.split()[0]
pos_text = line.strip(name)
try:
name,xpos,ypos,zpos = struct.unpack('4sx14sx14sx13s',pos_text)
pos = np.array([xpos,ypos,zpos],dtype=float) * au2unit
if crystal_pos:
pos = np.dot(pos,axes)
all_pos[istruct,iatom,:] = pos
except:
msg = 'failed to read (istruct, iatom)=(%d, %d)' %\
(istruct,iatom)
print(msg)
# end try
# end for iatom
# end for istruct
fhandle.close()
return all_axes,all_pos
# end def available_structures
def md_traces(md_out,nstep=2000):
""" extract scalar traces from pwscf md output md_out
look for tags defined in line_tag_map """
fhandle = open(md_out,'r+')
mm = mmap(fhandle.fileno(),0)
line_tag_map = { # unique identifier of the line that contains each key
'fermi energy':'the Fermi energy is',
'total energy':'!',
'kinetic energy':'kinetic energy',
'temperature':'temperature',
'econst':'Ekin + Etot'
}
val_idx_map = {} # assume -2
val_type_map = {} # assume float
mm.seek(0)
data = []
for istep in range(nstep):
if mm.tell() >= mm.size():
break
# end if
found_stuff = False
entry = {'istep':istep}
for label in line_tag_map.keys():
# locate line with value for label
idx = mm.find(line_tag_map[label].encode())
if idx == -1:
continue
# end if
found_stuff = True
mm.seek(idx)
line = mm.readline()
# locate value in line
rval_idx = -2 # assume patten "label = value unit"
if label in val_idx_map.keys():
rval_idx = val_idx_map[label]
# end if
rval = line.split()[rval_idx]
# convert value
val_type = float
if label in val_type_map.keys():
val_type = val_type_map[key]
# end if
value = val_type(rval)
entry[label] = value # !!!! assume float value
# end for
if found_stuff:
data.append(entry)
else:
break
# end if
# end for istep
if istep >= nstep-1:
msg = "WARNING: %d/%d structures found," % (istep, nstep)
msg += " nstep may need to be increased"
print(msg)
# end if
fhandle.close()
return data
# end def md_traces
def pos_in_box(pos,axes):
""" return atomic positions 'pos' in simulation box specified by 'axes' """
# convert positions to fractional coordinates
inv_axes = np.linalg.inv(axes)
upos = np.dot(pos,inv_axes)
upos -= np.floor(upos)
# convert back
newpos = np.dot(upos,axes)
return newpos
# end def
def input_structure(scf_in,put_in_box=True):
ndim = 3 # assume 3 dimensions
with open(scf_in,'r+') as f:
mm = mmap(f.fileno(),0)
# end with
from qharv.reel.ascii_out import name_sep_val
ntyp = name_sep_val(mm, 'ntyp', dtype=int)
if ntyp != 1:
raise NotImplementedError('only support 1 type of atom for now')
# end if
# read lattice
mm.seek(0)
idx = mm.find(b'ibrav')
mm.seek(idx)
ibrav_line = mm.readline()
ibrav = int(ibrav_line.split('=')[-1])
if ibrav != 0:
raise NotImplementedError('only ibrav = 0 is supported')
# end if
idx = mm.find(b'CELL_PARAMETERS')
mm.seek(idx)
header = mm.readline()
unit = header.split()[-1]
axes = np.zeros([ndim,ndim])
for idim in range(ndim):
line = mm.readline()
axes[idim,:] = map(float,line.split())
# end for
cell = {'unit':unit,'axes':axes}
# read atomic positions
mm.seek(0) # rewind
idx = mm.find(b'nat')
mm.seek(idx)
nat_line = mm.readline()
nat = int(nat_line.split('=')[-1])
idx = mm.find(b'ATOMIC_POSITIONS')
mm.seek(idx)
header = mm.readline()
unit = header.split()[-1]
pos = np.zeros([nat,ndim])
for iat in range(nat):
line = mm.readline()
pos[iat,:] = map(float,line.split()[-3:])
# end for iat
try:
line = mm.readline()
float(line.split()[-3:])
raise RuntimeError('next lines looks like positions too!\n%s'%line)
except:
pass # expect to see an empty line
# end try
if put_in_box:
atpos = {'pos_unit':unit,'pos':pos_in_box(np.array(pos),np.array(axes)).tolist()}
else:
atpos = {'pos_unit':unit,'pos':pos}
# end if
entry = {'infile':scf_in}
entry.update(cell)
entry.update(atpos)
return entry
# end def input_structure
def read_stress(pw_out,stress_tag = 'total stress (Ry/bohr**3)',nstruct_max=4096):
""" read all stress tensors from a quantum espresso output
Args:
pw_out (str): output filename
stress_tag (str): tag at the beginning of each text block containing the stress tensor
nstruct_max (int): maximum number of blocks to look for
Returns:
(list[np.array],list[np.array]): (au_mat_list,kbar_mat_list), lists of stress tensors read
"""
with open(pw_out,'r+') as f:
mm = mmap(f.fileno(),0)
# end with
au_mat_list = []
kbar_mat_list = []
stress_starts = all_lines_with_tag(mm,stress_tag,nstruct_max)
for idx in stress_starts:
mm.seek(idx)
header = mm.readline().decode()
tokens = header.split()
# make sure we are about to read the correct block of text
assert tokens[2].strip('()') == 'Ry/bohr**3'
assert tokens[3].strip('()') == 'kbar'
idx = header.find(b'P=')
press = float(header[idx:].strip('P=')) # average pressure in kbar, used for checking only
au_mat = [] # pressure in Ry/bohr**3
kbar_mat = [] # pressure in kbar
for idim in range(3): # assume 3 dimensions
line = mm.readline()
tokens = line.split()
assert len(tokens) == 6
au_mat.append(tokens[:3])
kbar_mat.append(tokens[3:])
# end for idim
kbar_mat = np.array(kbar_mat,dtype=float)
assert np.isclose(np.diagonal(kbar_mat).mean(),press)
kbar_mat_list.append(kbar_mat)
au_mat_list.append(np.array(au_mat,dtype=float))
# end for idx
return au_mat_list,kbar_mat_list
# end def read_stress
def vc_relax_output(fout):
all_axes,all_pos = available_structures(fout,variable_cell=True)
amats,kmats = read_stress(fout)
data = []
for i in range(len(all_axes)):
axes = all_axes[i]
pos = all_pos[i]
entry = {'istep':i,'axes':axes,'pos':pos,
'amat':amats[i],'kmat':kmats[i]}
data.append(entry)
# end for i
return data
# end def vc_relax_output
def relax_forces(fout,nstruct_max=4096):
""" read all force blocks from a relax output (may also work on md output)
Args:
fout (str): quantum espresso output, expected scf='relax'
nstruct_max (int): maximum number of force blocks to be read
Return:
np.array: shape (nstep,natom,ndim), forces on atoms at each optimization step
"""
nheader_before_forces = 2
""" e.g. Forces acting on atoms (Ry/au): # header line 1
# header line 2
atom 1 type 1 force = -0.00000000 -0.00012993 -0.00008628
"""
# get a memory map of the file
fhandle = open(fout,'r+')
mm = mmap(fhandle.fileno(),0)
# decide on array size
ndim = 3 # !!!! assume 3 dimensions
natom = value_by_label_sep_pos(mm,'number of atoms',dtype=int)
idx_list = all_lines_with_tag(mm,'Forces acting on atoms (Ry/au)',nstruct_max)
nstep = len(idx_list)
forces = np.zeros([nstep,natom,ndim])
# go through each force block
for istep in range(nstep):
mm.seek( idx_list[istep] )
for iheader in range(nheader_before_forces):
mm.readline() # skip headers
for iatom in range(natom):
line = mm.readline()
tokens = line.split()
if len(tokens) != 9:
raise RuntimeError('invalid force block %s' % line)
# end if
forces[istep,iatom,:] = map(float,tokens[-3:])
# end for iatom
# end for istep
# check that all forces have been read
line = mm.readline()
if line.startswith('atom'):
raise RuntimeError('extra force line %s before memory idx %d'%(line,mm.tell()))
# end if
return forces
# end def relax_forces
def relax_output(fout):
all_axes,all_pos = available_structures(fout,variable_cell=False)
forces = relax_forces(fout)
data = []
assert len(forces) == len(all_axes)
for i in range(len(all_axes)):
axes = all_axes[i]
pos = all_pos[i]
entry = {'istep':i,'axes':axes,'pos':pos,'forces':forces[i]}
data.append(entry)
# end for i
return data
# end def relax_output
def get_axsf_normal_mode(faxsf,imode):
""" extract the first normal mode labeled by 'PRIMCOORD {imode:d}'
assume the following format:
PRIMCOORD 1
16 1
H 0.00000 0.00000 1.50303 -0.00000 0.00000 0.02501
H 0.63506 0.63506 0.00000 0.00000 -0.00000 0.02500
...
Args:
faxsf (str): name of axsf file
imode (int): index of normal mode
Return:
tuple: (elem,data), elem is a list of atomic symbols,
data is a np.array of floats (6 columns in above example).
"""
from qharv.reel import ascii_out
mm = ascii_out.read(faxsf)
# search through all modes for requested imode
all_idx = ascii_out.all_lines_with_tag(mm,'PRIMCOORD')
found = False
for idx in all_idx:
mm.seek(idx)
line = mm.readline()
myi = int(line.split()[1])
if myi != imode: continue
# found imode
found = True
# get number of atoms
line = mm.readline()
natom = int(line.split()[0])
# get atomic symbols, positions and normal mode
elem = []
data = []
for iatom in range(natom):
line = mm.readline()
tokens = line.split()
elem.append(tokens[0])
data.append(map(float,tokens[1:]))
# end for iatom
# check that the next line is either next mode or empty
line = mm.readline()
expected = (line == '') or (line.startswith('PRIMCOORD'))
if not expected:
raise RuntimeError('failed to read mode %d correctly'%imode)
# end if
break
# end for idx
if not found:
raise RuntimeError('failed to find mode %d in %s'%(imode,faxsf))
# end if
return elem,np.array(data)
# end def get_axsf_normal_mode
def parse_output(floc):
""" get energy, volume and pressure from QE output """
etot = read_first_energy(floc)
entry = {'energy':etot/2.} # Ry to ha
mm = read(floc)
label_map = {
'volume':'unit-cell volume',
'natom':'number of atoms/cell'
}
for key in label_map.keys():
val = name_sep_val(mm, label_map[key])
entry[key] = val
# end for
au_stressl,kbar_stressl = read_stress(floc)
assert len(au_stressl) == 1
au_stress = au_stressl[0]
entry['pressure'] = np.diag(au_stress).mean()/2. # Ry to ha
entry['stress'] = au_stress/2. # Ry to ha
return entry
# end def parse_output
def parse_bands_out(bout, max_evline=1024):
fp = open(bout, 'r')
header = fp.readline()
nbnd, nks = [int(keyval.split('=')[1].strip('\n').strip('/'))
for keyval in header.split(',')]
kvecs = []
etable = []
for iks in xrange(nks):
kline = fp.readline()
kvecs.append( map(float, kline.split()) )
evl = []
mynbnd = 0
for i in xrange(max_evline):
bline = fp.readline()
nums = map(float, bline.split())
evl.append( nums )
mynbnd += len(nums)
if mynbnd >= nbnd: break
# end for
eva = [a for b in evl for a in b]
if not len(eva) == nbnd:
raise RuntimeError('increase max_evline')
etable.append(eva)
# end for
if len(fp.readline()) != 0:
raise RuntimeError('wrong nbnd')
fp.close()
return np.array(kvecs), np.array(etable)
# end def parse_bands_out
def parse_nscf_bands(nscf_out, span=7, trailer='occupation numbers'):
data = {} # build a dictionary as return value
def scanf_7f(line, n):
""" implement scanf("%7.*f") """
numl = []
for i in range(n):
token = line[span*i:span*(i+1)]
num = float(token)
numl.append(num)
return numl
def parse_float_body(body):
""" parse a blob of floats """
lines = body.split('\n')
numl = []
for line in lines:
if len(line) == 0: continue
numl += map(float, line.split())
return numl
from qharv.reel import ascii_out
ndim = 3
mm = ascii_out.read(nscf_out)
alat = ascii_out.name_sep_val(mm, 'lattice parameter (alat)')
blat = 2*np.pi/alat
# find the beginnings of each band
bhead = ' k ='
idxl = ascii_out.all_lines_with_tag(mm, bhead)
nkpt = len(idxl)
data['nkpt'] = nkpt
# estimate the end of the last band
idx1 = ascii_out.all_lines_with_tag(mm, trailer)[-1]
# trick to use no if statement in the loop
idxl = idxl + [idx1]
kvecs = [] # (nkpt, ndim)
mat = [] # (nkpt, nbnd)
for ikpt in range(nkpt):
# specify beginning and end of the band output
idx0 = idxl[ikpt]
idx1 = idxl[ikpt+1]
# parse band output
# first read header
mm.seek(idx0)
header = mm.readline().decode()
if not 'bands (ev)' in header: continue
kxkykz = ascii_out.lr_mark(header, '=', '(')
kvec = scanf_7f(kxkykz, ndim)
kvecs.append(kvec)
# then read body
body = mm[mm.tell():idx1].decode().strip('\n')
if trailer in body:
idx2 = mm.find(trailer.encode())
body = mm[mm.tell():idx2].strip('\n')
row = parse_float_body(body)
mat.append(row)
# end for ikpt
data['kvecs'] = blat*np.array(kvecs)
data['bands'] = np.array(mat)
return data
def parse_kline(line, ik=None):
from qharv.reel import ascii_out
assert 'k(' in line
ikt, kvect, wkt = line.split('=')
myik = int(ascii_out.lr_mark(ikt, '(', ')'))
if ik is not None: # check k index
assert ik == myik-1 # fortran 1-based indexing
wk = float(wkt)
klist = ascii_out.lr_mark(kvect, '(', ')').split()
kvec = np.array(klist, dtype=float)
return kvec, wk
def read_kpoints(scf_out):
from qharv.reel import ascii_out
mm = ascii_out.read(scf_out)
# get lattice units
alat = ascii_out.name_sep_val(mm, 'lattice parameter (alat)')
blat = 2*np.pi/alat
# start parsing k points
idx = mm.find(b'number of k points')
mm.seek(idx)
# read first line
# e.g. number of k points= 32 Fermi-Dirac smearing ...
line = mm.readline().decode()
nk = int(line.split('=')[1].split()[0])
# confirm units in second line
line = mm.readline().decode()
assert '2pi/alat' in line
# start parsing kvectors
data = np.zeros([nk, 4]) # ik, kx, ky, kz, wk
for ik in range(nk):
line = mm.readline().decode()
kvec, wk = parse_kline(line, ik=ik)
data[ik, :3] = kvec*blat
data[ik, 3] = wk
return data
def read_kfracs(scf_out):
from qharv.reel import ascii_out
mm = ascii_out.read(scf_out)
# get number of kpoints
idx = mm.find(b'number of k points')
mm.seek(idx)
line = mm.readline().decode()
nk = int(line.split('=')[1].split()[0])
# find first line
idx = mm.find(b'cryst. coord.')
mm.seek(idx)
mm.readline()
# read kpoints and weights
data = np.zeros([nk, 4])
for ik in range(nk):
line = mm.readline().decode()
kvec, wk = parse_kline(line)
data[ik, :3] = kvec
data[ik, 3] = wk
return data
def parse_scf_conv(scf_out):
from qharv.reel import ascii_out
mm = ascii_out.read(scf_out)
idxl = ascii_out.all_lines_with_tag(mm, 'iteration #')
data = []
for idx in idxl:
mm.seek(idx)
# read iteration number
iternow = ascii_out.name_sep_val(mm, 'iteration', sep='#', dtype=int)
# find total energy and other info (!!!! must be in order)
try:
time = ascii_out.name_sep_val(mm, 'cpu time spent up to now', sep='is')
enow = ascii_out.name_sep_val(mm, 'total energy')
except:
continue
entry = {'istep':iternow, 'energy':enow, 'time':time}
data.append(entry)
return data
def get_efermi(fout):
from qharv.reel import ascii_out
mm = ascii_out.read(fout)
efermi = ascii_out.name_sep_val(mm, 'the Fermi energy', sep='is')
return efermi
def get_gc_occ(mat, efermi):
""" get grand canonical occupation vector
example:
data = qer.parse_nscf_bands(scf_out)
kvecs = data['kvecs']
bands = np.array(data['bands'])
mm = ascii_out.read(scf_out)
efermi = ascii_out.name_sep_val(mm, 'the Fermi energy', sep='is')
norbs = get_gc_occ(bands, efermi)
Args:
mat (np.array): Kohn-Sham eigenvalues (nkpt, nband)
efermi (float): Fermi energy
Return:
np.array: number of occupied orbitals at each kpoint
"""
norbl = []
nkpt, nbnd = mat.shape
for ikpt in range(nkpt):
row = mat[ikpt]
sel = row<=efermi
norb = len(row[sel])
norbl.append(norb)
# end for
norbs = np.array(norbl)
return norbs
def get_occ_df(kvecs, norbs):
""" save grand canonical occupation vector with twists
Args:
kvecs (np.array): twist vectors, user-defined units
norbs (np.array): a list of integers
"""
import pandas as pd
cols = ('kmag', 'norb', 'kx', 'ky', 'kz')
kmags = np.linalg.norm(kvecs, axis=1)
data = np.zeros([len(norbs), len(cols)])
data[:, 0] = kmags
data[:, 1] = norbs
data[:, 2:] = kvecs
mydf = pd.DataFrame(data, columns=cols)
mydf['norb'] = mydf['norb'].astype(int)
mydf['group'] = mydf.index
return mydf
def read_cell(scf_in, ndim=3):
with open(scf_in,'r+') as f:
mm = mmap(f.fileno(), 0)
idx = mm.find(b'CELL_PARAMETERS')
mm.seek(idx)
header = mm.readline()
unit = header.split()[-1]
mat = np.zeros([ndim, ndim])
for idim in range(ndim):
line = mm.readline()
vec = np.array(line.split(), dtype=float)
mat[idim, :] = vec
data = {
'unit': str(unit),
'axes': mat
}
return data
def read_out_cell(scf_out, ndim=3):
axes = np.zeros([ndim, ndim])
from qharv.reel import ascii_out
mm = ascii_out.read(scf_out)
idx = mm.find(b'crystal axes')
mm.seek(idx)
mm.readline()
for idim in range(ndim):
line = mm.readline()
right = line.split('=')[-1]
text = ascii_out.lr_mark(right, '(', ')')
axes[idim, :] = map(float, text.split())
return axes
def get_occupation_numbers(nscf_out, nmax=1024):
from qharv.reel import ascii_out
mm = ascii_out.read(nscf_out)
idx = ascii_out.all_lines_with_tag(mm, 'occupation numbers')
occl = []
for i in idx:
mm.seek(i)
mm.readline()
occ = []
for j in range(nmax):
line = mm.readline()
tokens = line.split()
if len(tokens) == 0:
break
occ += map(float, tokens)
next_line = mm.readline()
occl.append(occ)
return np.array(occl)
def read_sym_ops(scf_out, ndim=3):
""" read symmetry operators
Args:
scf_out (str): QE output file
ndim (int, optional): number of spatial dimensions, default is 3
Return:
list: all symmetry operators, each is represented as a dictionary
isym is index, name is description, vec is shift, mat is rotation
"""
from qharv.reel import ascii_out
mm = ascii_out.read(scf_out)
# find starting location of symmetry operator output
idx = mm.find(b'Sym. Ops.')
if idx == -1:
msg = 'no symmetry operations printed in %s. Is verbosity high?' % scf_out
raise RuntimeError(msg)
# rewind to beginning of line
idx0 = mm.rfind(b'\n', 0, idx)
mm.seek(idx0+1)
header = mm.readline().decode()
nsym = int(header.split()[0])
# check the number of symmetry outputs
idxl = ascii_out.all_lines_with_tag(mm, 'isym = ')
if len(idxl) != nsym:
raise RuntimeError('found %d symm. expected %d' % (len(idxl), nsym))
# parse symmetry operators
symops = []
for idx in idxl:
mm.seek(idx)
# read symmetry index and name: isym, name
line0 = mm.readline().decode()
text0 = line0.split('=')[1]
tokens0 = text0.split()
isym = int(tokens0[0])
name = ' '.join(tokens0[1:])
# read translation vector: vec
vec = [0]*ndim
if 'cart. axis' in name:
vect = ascii_out.lr_mark(line0, '[', ']')
vec[:] = list(map(float, vect.split(',')))
# read rotation matrix: mat
mat = []
idx = mm.find(b'cryst.')
mm.readline() # skip empty line
for idim in range(ndim):
line = mm.readline().decode()
if 'cryst.' in line:
line = line.split('=')[1]
text = ascii_out.lr_mark(line, '(', ')')
mat.append(list(map(float, text.split())))
entry = {
'isym': isym,
'name': name,
'vec': vec,
'mat': mat
}
symops.append(entry)
mm.close()
return symops
def get_weights(nscf_out, remove_copy=False, atol=1e-10):
from qharv.reel import ascii_out
mm = ascii_out.read(nscf_out)
idx = ascii_out.all_lines_with_tag(mm, 'wk =')
lines = ascii_out.all_lines_at_idx(mm, idx)
weights = []
for line in lines:
wt = float(line.strip('\n').split('wk =')[-1])
weights.append(wt)
mm.close()
nt = len(weights)
if remove_copy:
weights = weights[:nt/2]
wtot = sum(weights)
if not np.isclose(wtot, 2.0, atol=atol):
raise RuntimeError('wrong weight sum %3.2f; expected 2.0' % wtot)
return np.array(weights)
def get_gc_occ(bands, efermi):
norbl = []
nkpt, nbnd = bands.shape
for ikpt in range(nkpt):
row = bands[ikpt]
sel = row<=efermi
norb = len(row[sel])
norbl.append(norb)
norbs = np.array(norbl)
return norbs
def get_tgrid_tshift(nscf_in):
from qharv.reel import ascii_out
mm = ascii_out.read(nscf_in)
idx = mm.find(b'K_POINTS automatic')
mm.seek(idx)
mm.readline()
kline = mm.readline()
mm.close()
nums = list(map(int, kline.split()))
tgrid = np.array(nums[:3])
tshift = np.array(nums[3:])
return tgrid, tshift
def get_axes(nscf_in, ndim=3):
from qharv.reel import ascii_out
mm = ascii_out.read(nscf_in)
idx = mm.find(b'CELL_PARAMETERS')
mm.seek(idx)
mm.readline()
cell = []
for idim in range(ndim):
line = mm.readline().decode()
nums = list(map(float, line.split()))
cell.append(nums)
mm.close()
axes = np.array(cell)
return axes
def get_tgrid_raxes(nscf_in, ndim=3):
from qharv.inspect import axes_pos
tgrid, tshift = get_tgrid_tshift(nscf_in)
axes = get_axes(nscf_in, ndim=ndim)
raxes = axes_pos.raxes(axes)
return tgrid, raxes
def get_elem_pos(nscf_in):
from qharv.reel import ascii_out
mm = ascii_out.read(nscf_in)
natom = ascii_out.name_sep_val(mm, 'nat', '=', dtype=int)
idx = mm.find(b'ATOMIC_POSITIONS')
mm.seek(idx)
header = mm.readline().decode()
eleml = []
posl = []
for iatom in range(natom):
line = mm.readline()
tokens = line.split()
eleml.append(tokens[0])
posl.append(tokens[1:])
mm.close()
elem = np.array(eleml, dtype=str)
pos = np.array(posl, dtype=float)
return elem, pos, header
| 28.944889
| 154
| 0.604336
|
4a0619a07b5229fe16c99b58078c5eed1379b83d
| 3,038
|
py
|
Python
|
server/djangoapp/models.py
|
jalsop24/agfzb-CloudAppDevelopment_Capstone
|
494670b518f7dfe397700afd7f251b70287ee053
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/models.py
|
jalsop24/agfzb-CloudAppDevelopment_Capstone
|
494670b518f7dfe397700afd7f251b70287ee053
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/models.py
|
jalsop24/agfzb-CloudAppDevelopment_Capstone
|
494670b518f7dfe397700afd7f251b70287ee053
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.utils.timezone import now
# Create your models here.
# <HINT> Create a Car Make model `class CarMake(models.Model)`:
# - Name
# - Description
# - Any other fields you would like to include in car make model
# - __str__ method to print a car make object
class CarMake(models.Model):
Name = models.TextField()
Description = models.TextField()
def __str__(self) -> str:
return f"{self.Name}"
def __repr__(self) -> str:
return f"CarMake(Name={self.Name}, Description={self.Description})"
# <HINT> Create a Car Model model `class CarModel(models.Model):`:
# - Many-To-One relationship to Car Make model (One Car Make has many Car Models, using ForeignKey field)
# - Name
# - Dealer id, used to refer a dealer created in cloudant database
# - Type (CharField with a choices argument to provide limited choices such as Sedan, SUV, WAGON, etc.)
# - Year (DateField)
# - Any other fields you would like to include in car model
# - __str__ method to print a car make object
class CarModel(models.Model):
Name = models.TextField()
Make = models.ForeignKey(CarMake, on_delete=models.DO_NOTHING)
DealerId = models.IntegerField()
Type = models.CharField(choices=[
("Sedan", "Sedan"),
("SUV", "SUV"),
("Wagon", "Wagon"),
("Hatchback", "Hatchback"),
],
max_length=50)
Year = models.DateField()
def __repr__(self) -> str:
return f"CarModel(Make={self.Make}, DealerId={self.DealerId}, Type={self.Type}, Year={self.Year})"
# <HINT> Create a plain Python class `CarDealer` to hold dealer data
class CarDealer:
def __init__(self, id, full_name, short_name, address, city, st, zip, lat, long):
# Dealer id
self.id = id
# Dealer Full Name
self.full_name = full_name
# Dealer short name
self.short_name = short_name
# Dealer address
self.address = address
# Dealer city
self.city = city
# Dealer state
self.st = st
# Dealer zip
self.zip = zip
# Location lat
self.lat = lat
# Location long
self.long = long
def __str__(self):
return f"Dealer name: {self.full_name!r}"
# <HINT> Create a plain Python class `DealerReview` to hold review data
class DealerReview:
def __init__(self, id, name, text, dealer_id, car_make, car_model, car_year, did_purchase, purchase_date, sentiment=None):
# Review id
self.id = id
# Customer name
self.name = name
# Review text / message
self.text = text
# Dealership
self.dealer_id = dealer_id
self.car_make = car_make
self.car_model = car_model
self.car_year = car_year
self.did_purchase = did_purchase
self.purchase_date = purchase_date
self.sentiment = sentiment
def __str__(self):
return f"'{self.text}' - {self.name}"
| 29.211538
| 126
| 0.625741
|
4a061a659a16348cbc7b00e61595deccbac0a3a8
| 189
|
py
|
Python
|
data_collection/gazette/spiders/sc_jupia.py
|
kaiocp/querido-diario
|
86004049c6eee305e13066cf3607d30849bb099a
|
[
"MIT"
] | 454
|
2018-04-07T03:32:57.000Z
|
2020-08-17T19:56:22.000Z
|
data_collection/gazette/spiders/sc_jupia.py
|
kaiocp/querido-diario
|
86004049c6eee305e13066cf3607d30849bb099a
|
[
"MIT"
] | 254
|
2020-08-18T14:09:43.000Z
|
2022-03-28T11:30:51.000Z
|
data_collection/gazette/spiders/sc_jupia.py
|
kaiocp/querido-diario
|
86004049c6eee305e13066cf3607d30849bb099a
|
[
"MIT"
] | 183
|
2018-04-11T15:09:37.000Z
|
2020-08-15T18:55:11.000Z
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScJupiaSpider(FecamGazetteSpider):
name = "sc_jupia"
FECAM_QUERY = "cod_entidade:143"
TERRITORY_ID = "4209177"
| 23.625
| 57
| 0.761905
|
4a061b31351d9122b46f42c91c2fdde07f5a4de0
| 12,947
|
py
|
Python
|
src/ezdxf/entitydb.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | 1
|
2021-06-05T09:15:15.000Z
|
2021-06-05T09:15:15.000Z
|
src/ezdxf/entitydb.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | null | null | null |
src/ezdxf/entitydb.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019-2020, Manfred Moitzi
# License: MIT License
from typing import Optional, Iterable, Tuple, TYPE_CHECKING, Dict, Set
from contextlib import contextmanager
from ezdxf.tools.handle import HandleGenerator
from ezdxf.lldxf.types import is_valid_handle
from ezdxf.entities.dxfentity import DXFEntity
from ezdxf.audit import AuditError, Auditor
from ezdxf.lldxf.const import DXFInternalEzdxfError
from ezdxf.entities.subentity import LinkedEntities
from ezdxf.entities import factory
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter
DATABASE_EXCLUDE = {
'SECTION', 'ENDSEC', 'EOF', 'TABLE', 'ENDTAB', 'CLASS', 'ACDSRECORD',
'ACDSSCHEMA'
}
class EntityDB:
""" A simple key/entity database.
Every entity/object, except tables and sections, are represented as
DXFEntity or inherited types, this entities are stored in the
DXF document database, database-key is the `handle` as string.
"""
class Trashcan:
""" Store handles to entities which should be deleted later. """
def __init__(self, db: 'EntityDB'):
self._database = db._database
self._handles: Set[str] = set()
def add(self, handle: str):
""" Put handle into trashcan to delete the entity later, this is
required for deleting entities while iterating the database.
"""
self._handles.add(handle)
def clear(self):
""" Remove handles in trashcan from database and destroy entities if
still alive.
"""
db = self._database
for handle in self._handles:
entity = db.get(handle)
if entity and entity.is_alive:
entity.destroy()
if handle in db:
del db[handle]
self._handles.clear()
def __init__(self):
self._database: Dict[str, DXFEntity] = {}
# DXF handles of entities to delete later:
self.handles = HandleGenerator()
self.locked: bool = False # used only for debugging
def __getitem__(self, handle: str) -> DXFEntity:
""" Get entity by `handle`, does not filter destroyed entities nor
entities in the trashcan.
"""
return self._database[handle]
def __setitem__(self, handle: str, entity: DXFEntity) -> None:
""" Set `entity` for `handle`. """
assert isinstance(handle, str), type(handle)
assert isinstance(entity, DXFEntity), type(entity)
assert entity.is_alive, 'Can not store destroyed entity.'
if self.locked:
raise DXFInternalEzdxfError('Locked entity database.')
if handle == '0' or not is_valid_handle(handle):
raise ValueError(f'Invalid handle {handle}.')
self._database[handle] = entity
def __delitem__(self, handle: str) -> None:
""" Delete entity by `handle`. Removes entity only from database, does
not destroy the entity.
"""
if self.locked:
raise DXFInternalEzdxfError('Locked entity database.')
del self._database[handle]
def __contains__(self, handle: str) -> bool:
""" ``True`` if database contains `handle`. """
if handle is None:
return False
assert isinstance(handle, str), type(handle)
return handle in self._database
def __len__(self) -> int:
""" Count of database items. """
return len(self._database)
def __iter__(self) -> Iterable[str]:
""" Iterable of all handles, does filter destroyed entities but not
entities in the trashcan.
"""
return self.keys()
def get(self, handle: str) -> Optional[DXFEntity]:
""" Returns entity for `handle` or ``None`` if no entry exist, does
not filter destroyed entities.
"""
return self._database.get(handle)
def next_handle(self) -> str:
""" Returns next unique handle."""
while True:
handle = self.handles.next()
if handle not in self._database:
return handle
def keys(self) -> Iterable[str]:
""" Iterable of all handles, does filter destroyed entities.
"""
return (handle for handle, entity in self.items())
def values(self) -> Iterable[DXFEntity]:
""" Iterable of all entities, does filter destroyed entities.
"""
return (entity for handle, entity in self.items())
def items(self) -> Iterable[Tuple[str, DXFEntity]]:
""" Iterable of all (handle, entities) pairs, does filter destroyed
entities.
"""
return (
(handle, entity) for handle, entity in self._database.items()
if entity.is_alive
)
def add(self, entity: DXFEntity) -> None:
""" Add `entity` to database, assigns a new handle to the `entity`
if :attr:`entity.dxf.handle` is ``None``. Adding the same entity
multiple times is possible and creates only a single database entry.
"""
if entity.dxftype() in DATABASE_EXCLUDE:
if entity.dxf.handle is not None:
# Mark existing entity handle as used to avoid
# reassigning the same handle again.
self[entity.dxf.handle] = entity
return
handle: str = entity.dxf.handle
if handle is None:
handle = self.next_handle()
entity.update_handle(handle)
self[handle] = entity
# Add sub entities ATTRIB, VERTEX and SEQEND to database.
if isinstance(entity, LinkedEntities):
entity.add_sub_entities_to_entitydb(self)
def delete_entity(self, entity: DXFEntity) -> None:
""" Remove `entity` from database and destroy the `entity`. """
if entity.is_alive:
del self[entity.dxf.handle]
entity.destroy()
def discard(self, entity: 'DXFEntity') -> None:
""" Discard entity from database without destroying the entity. """
if entity.is_alive:
if isinstance(entity, LinkedEntities):
entity.process_sub_entities(lambda e: self.discard(e))
handle = entity.dxf.handle
try:
del self._database[handle]
entity.dxf.handle = None
except KeyError:
pass
def duplicate_entity(self, entity: DXFEntity) -> DXFEntity:
""" Duplicates `entity` and its sub entities (VERTEX, ATTRIB, SEQEND)
and store them with new handles in the entity database.
Graphical entities have to be added to a layout by
:meth:`~ezdxf.layouts.BaseLayout.add_entity`.
To import DXF entities from another drawing use the
:class:`~ezdxf.addons.importer.Importer` add-on.
A new owner handle will be set by adding the duplicated entity to a
layout.
"""
new_entity: DXFEntity = entity.copy()
new_entity.dxf.handle = self.next_handle()
factory.bind(new_entity, entity.doc)
return new_entity
def audit(self, auditor: 'Auditor'):
""" Restore database integrity:
- restore database entries with modified handles (key != entity.dxf.handle)
- remove entities with invalid handles
- empty trashcan - destroy all entities in the trashcan
- removes destroyed database entries (purge)
"""
assert self.locked is False, 'Database is locked!'
add_entities = []
with self.trashcan() as trash:
for handle, entity in self.items():
# Destroyed entities are already filtered!
if not is_valid_handle(handle):
auditor.fixed_error(
code=AuditError.INVALID_ENTITY_HANDLE,
message=f'Removed entity {entity.dxftype()} with invalid '
f'handle "{handle}" from entity database.',
)
trash.add(handle)
if handle != entity.dxf.get('handle'):
# database handle != stored entity handle
# prevent entity from being destroyed:
self._database[handle] = None
trash.add(handle)
add_entities.append(entity)
# Remove all destroyed entities from database:
self.purge()
for entity in add_entities:
handle = entity.dxf.get('handle')
if handle is None:
auditor.fixed_error(
code=AuditError.INVALID_ENTITY_HANDLE,
message=f'Removed entity {entity.dxftype()} without handle '
f'from entity database.',
)
continue
if not is_valid_handle(handle) or handle == '0':
auditor.fixed_error(
code=AuditError.INVALID_ENTITY_HANDLE,
message=f'Removed entity {entity.dxftype()} with invalid '
f'handle "{handle}" from entity database.',
)
continue
self[handle] = entity
def new_trashcan(self) -> 'EntityDB.Trashcan':
""" Returns a new trashcan, empty trashcan manually by: :
func:`Trashcan.clear()`.
"""
return EntityDB.Trashcan(self)
@contextmanager
def trashcan(self) -> 'EntityDB.Trashcan':
""" Returns a new trashcan in context manager mode, trashcan will be
emptied when leaving context.
"""
trashcan_ = self.new_trashcan()
yield trashcan_
# try ... finally is not required, in case of an exception the database
# is maybe already in an unreliable state.
trashcan_.clear()
def purge(self) -> None:
""" Remove all destroyed entities from database, but does not empty the
trashcan.
"""
# Important: operate on underlying data structure:
db = self._database
dead_handles = [
handle for handle, entity in db.items()
if not entity.is_alive
]
for handle in dead_handles:
del db[handle]
def dxf_types_in_use(self) -> Set[str]:
return set(entity.dxftype() for entity in self.values())
class EntitySpace:
"""
An :class:`EntitySpace` is a collection of :class:`~ezdxf.entities.DXFEntity`
objects, that stores only references to :class:`DXFEntity` objects.
The :class:`~ezdxf.layouts.Modelspace`, any :class:`~ezdxf.layouts.Paperspace`
layout and :class:`~ezdxf.layouts.BlockLayout` objects have an
:class:`EntitySpace` container to store their entities.
"""
def __init__(self, entities=None):
entities = entities or []
self.entities = list(e for e in entities if e.is_alive)
def __iter__(self) -> Iterable['DXFEntity']:
""" Iterable of all entities, filters destroyed entities. """
return (e for e in self.entities if e.is_alive)
def __getitem__(self, index) -> 'DXFEntity':
""" Get entity at index `item`
:class:`EntitySpace` has a standard Python list like interface,
therefore `index` can be any valid list indexing or slicing term, like
a single index ``layout[-1]`` to get the last entity, or an index slice
``layout[:10]`` to get the first 10 or less entities as
``List[DXFEntity]``. Does not filter destroyed entities.
"""
return self.entities[index]
def __len__(self) -> int:
""" Count of entities inluding destroyed entities. """
return len(self.entities)
def has_handle(self, handle: str) -> bool:
""" ``True`` if `handle` is present, does filter destroyed entities. """
assert isinstance(handle, str), type(handle)
return any(e.dxf.handle == handle for e in self)
def purge(self):
""" Remove all destroyed entities from entity space. """
self.entities = list(self)
def add(self, entity: 'DXFEntity') -> None:
""" Add `entity`. """
assert isinstance(entity, DXFEntity), type(entity)
assert entity.is_alive, 'Can not store destroyed entities'
self.entities.append(entity)
def extend(self, entities: Iterable['DXFEntity']) -> None:
""" Add multiple `entities`."""
for entity in entities:
self.add(entity)
def export_dxf(self, tagwriter: 'TagWriter') -> None:
""" Export all entities into DXF file by `tagwriter`.
(internal API)
"""
for entity in iter(self):
entity.export_dxf(tagwriter)
def remove(self, entity: 'DXFEntity') -> None:
""" Remove `entity`. """
self.entities.remove(entity)
def clear(self) -> None:
""" Remove all entities. """
# Do not destroy entities!
self.entities = list()
| 36.573446
| 83
| 0.599985
|
4a061b45912558814f2153090319c78d49b082d4
| 3,673
|
py
|
Python
|
nbgrader/apps/listapp.py
|
aliniknejad/nbgrader
|
124095e48a840ac2af6e3178eab7ed32089f3cd2
|
[
"BSD-3-Clause"
] | 1
|
2019-10-02T11:06:32.000Z
|
2019-10-02T11:06:32.000Z
|
nbgrader/apps/listapp.py
|
aliniknejad/nbgrader
|
124095e48a840ac2af6e3178eab7ed32089f3cd2
|
[
"BSD-3-Clause"
] | 4
|
2019-03-02T11:49:46.000Z
|
2020-09-07T10:17:52.000Z
|
nbgrader/apps/listapp.py
|
aliniknejad/nbgrader
|
124095e48a840ac2af6e3178eab7ed32089f3cd2
|
[
"BSD-3-Clause"
] | 2
|
2019-05-31T08:53:48.000Z
|
2019-05-31T09:42:26.000Z
|
# coding: utf-8
from traitlets import default
from .baseapp import NbGrader, nbgrader_aliases, nbgrader_flags
from ..exchange import Exchange, ExchangeList, ExchangeError
aliases = {}
aliases.update(nbgrader_aliases)
aliases.update({
"timezone": "Exchange.timezone",
"course": "CourseDirectory.course_id",
})
flags = {}
flags.update(nbgrader_flags)
flags.update({
'inbound': (
{'ExchangeList' : {'inbound': True}},
"List inbound files rather than outbound."
),
'cached': (
{'ExchangeList' : {'cached': True}},
"List cached files rather than inbound/outbound."
),
'remove': (
{'ExchangeList' : {'remove': True}},
"Remove an assignment from the exchange."
),
'json': (
{'ExchangeList' : {'as_json': True}},
"Print out assignments as json."
),
})
class ListApp(NbGrader):
name = u'nbgrader-list'
description = u'List assignments in the nbgrader exchange'
aliases = aliases
flags = flags
examples = """
List assignments in the nbgrader exchange. For the usage of instructors
and students.
Students
========
To list assignments for a course, you must first know the `course_id` for
your course. If you don't know it, ask your instructor.
To list the released assignments for the course `phys101`:
nbgrader list phys101
Instructors
===========
To list outbound (released) or inbound (submitted) assignments for a course,
you must configure the `course_id` in your config file or the command line.
To see all of the released assignments, run
nbgrader list # course_id in the config file
or
nbgrader list --course phys101 # course_id provided
To see the inbound (submitted) assignments:
nbgrader list --inbound
You can use the `--student` and `--assignment` options to filter the list
by student or assignment:
nbgrader list --inbound --student=student1 --assignment=assignment1
If a student has submitted an assignment multiple times, the `list` command
will show all submissions with their timestamps.
The `list` command can optionally remove listed assignments by providing the
`--remove` flag:
nbgrader list --inbound --remove --student=student1
"""
@default("classes")
def _classes_default(self):
classes = super(ListApp, self)._classes_default()
classes.extend([Exchange, ExchangeList])
return classes
def _load_config(self, cfg, **kwargs):
if 'ListApp' in cfg:
self.log.warning(
"Use ExchangeList in config, not ListApp. Outdated config:\n%s",
'\n'.join(
'ListApp.{key} = {value!r}'.format(key=key, value=value)
for key, value in cfg.ListApp.items()
)
)
cfg.ExchangeList.merge(cfg.ListApp)
del cfg.ListApp
super(ListApp, self)._load_config(cfg, **kwargs)
def start(self):
super(ListApp, self).start()
# set assignemnt and course
if len(self.extra_args) == 1:
self.coursedir.assignment_id = self.extra_args[0]
elif len(self.extra_args) > 2:
self.fail("Too many arguments")
lister = ExchangeList(
coursedir=self.coursedir,
authenticator=self.authenticator,
parent=self)
try:
lister.start()
except ExchangeError:
self.fail("nbgrader list failed")
| 28.695313
| 84
| 0.604138
|
4a061bf6978a578c27d3169cde197363cfdfc223
| 2,939
|
py
|
Python
|
100dayspython/day003/main.py
|
mrqssjeff/project-python
|
b3b08f2acfe825640a5ee92cf9d6fa45ab580384
|
[
"MIT"
] | null | null | null |
100dayspython/day003/main.py
|
mrqssjeff/project-python
|
b3b08f2acfe825640a5ee92cf9d6fa45ab580384
|
[
"MIT"
] | null | null | null |
100dayspython/day003/main.py
|
mrqssjeff/project-python
|
b3b08f2acfe825640a5ee92cf9d6fa45ab580384
|
[
"MIT"
] | null | null | null |
print('''
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⡠⠤⠒⠊⠉⠉⠓⠒⠒⠒⠢⢤⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⠔⠊⠁⠀⠀⠀⠀⠀⠀⠀⢀⠰⡄⠠⡀⠈⠓⢦⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⢀⡔⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠣⢣⠀⠹⡀⢣⠀⠱⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⣠⠋⠀⠀⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠠⠀⢱⠀⡇⢠⠁⡀⢱⡄⠘⢆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⣰⠃⠀⠀⡞⠆⢠⠀⠀⠀⢀⣠⣤⣴⣶⣾⣿⣿⣶⣷⣾⣦⣧⣸⡟⠀⠘⡆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⣸⠃⠀⠀⠀⠁⡀⠈⣦⣴⣾⣿⣿⣿⣿⣿⡿⠿⠟⠛⠛⠋⠉⠉⠉⢉⡍⠉⠉⠓⠒⠠⠤⠀⡤⠒⠤⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⢀⡇⠀⢠⠀⢹⣀⣽⣿⣿⣿⠿⠿⠛⠋⠉⠀⠀⠀⠀⠀⠀⠀⣀⣴⣾⠋⠀⠠⠩⠶⣄⣀⠀⢸⠁⠀⠀⠈⢢⡀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⢸⠁⠀⠘⡄⣰⣿⡿⠟⠋⠀⠰⠚⢁⣁⡶⠤⣤⣤⣶⣶⣾⣿⣿⣯⣤⣶⣦⣴⣴⣖⠢⠌⠁⢸⠀⠀⠀⠀⠀⡟⠢⡀⠀⠀⠀⠀⠀
⠀⠀⢸⠀⠀⠀⣸⠟⢉⡴⠞⠛⣰⣶⡾⠋⠀⠀⠀⠘⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡁⠈⠙⠀⠀⣘⡄⠀⠀⠀⠀⢻⠀⠹⡄⠀⠀⠀⠀
⠀⠀⠀⢧⡴⠊⠁⠀⢀⣤⣶⣿⣿⣿⠃⠀⠀⠀⠀⠀⠈⢿⣿⠙⣿⣿⣿⣿⣿⣿⣿⣿⣦⡀⢤⡶⡈⢻⡄⠀⠀⢀⣸⢀⢸⣿⠀⠀⠀⠀
⠀⢀⡴⠋⠀⣀⣴⣶⣿⣿⣿⣿⣿⡏⠀⠀⠀⠀⠀⠀⠀⠈⢿⡇⠈⢿⣿⣿⣿⣿⣿⣿⣿⡿⢷⣷⡜⡀⣟⠛⣖⠉⢹⠸⠟⣿⠀⠀⠀⠀
⡴⢋⠔⡶⣻⣿⣿⣿⣿⣿⣿⠃⣿⠁⠀⠀⠀⠀⠀⠀⠀⠀⠘⡇⣀⡤⠿⣧⠻⣿⣿⣿⣿⣷⣄⢠⡅⢃⢸⠀⢿⡦⠸⡀⢠⠃⠀⠀⠀⠀
⠔⢁⣤⣾⣿⠋⣿⣿⣿⣿⣿⠠⢽⢄⣀⠀⠀⠀⠀⠀⠀⠀⠴⠋⠁⣀⡀⣹⡀⢻⣿⣿⣿⣯⠙⠻⢇⡎⢸⠀⣿⣿⣆⣷⡃⠀⠀⠀⠀⠀
⠀⡼⡽⡵⠁⢸⣿⣿⣿⢻⡏⠀⠘⠀⠀⠉⠉⠀⠠⡄⠀⠀⠀⣴⡾⡛⢛⣽⠇⣀⣿⢹⣿⣳⣷⢀⣞⣠⠼⠒⠉⠁⠀⠈⠉⠲⢄⡀⠀⠀
⡾⠉⣠⡅⢰⣿⣿⣿⣿⠈⡇⢠⡴⠖⠻⠶⡆⠀⠀⠁⠀⠀⠀⠈⠐⠻⢍⣩⣖⡽⡍⢸⣶⣧⢘⡿⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⡦⠀
⡇⠖⡧⡇⢸⡟⡸⠛⣿⡀⠀⡈⠀⠀⠀⠀⠀⠀⠀⠙⠶⢀⣀⣠⠤⠒⠙⠁⠈⠀⡇⢸⣇⠜⡿⣿⠙⠆⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⡇⠀
⠙⡄⠇⠃⢎⠀⢻⠛⣿⡇⠀⣏⣹⠲⡶⠒⠒⠒⠒⠋⠉⠉⠀⠀⠀⠀⠀⠀⡠⢾⠇⢸⣡⣾⣧⠛⢀⣀⣠⠤⠒⠒⠉⠁⠀⠀⠀⠺⠀⠀
⠁⠿⣀⠺⠶⡁⠘⡄⢾⣿⡀⢸⡙⠀⠀⠀⠀⠀⢀⣀⡠⠤⠄⠒⠒⠒⠒⠾⠳⡟⢀⡟⠩⢻⠻⣿⣾⣧⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠐⣄
⣏⡒⠬⣙⠒⠚⣦⠘⠦⣙⢳⡀⢻⣦⣨⡦⠒⠉⠁⠀⠀⠀⠀⠀⠀⠀⠀⢀⠜⢡⡾⣇⣠⠂⢰⡇⡿⡏⡇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⣿
⠀⠉⠓⠒⠦⠤⠤⠤⠤⠬⢍⠳⣄⠙⢧⡁⠀⠀⠀⠀⠀⠀⠀⢀⣤⣤⠖⣡⣶⠟⠀⠀⠌⡈⠈⣷⣄⣁⣷⡠⠤⠤⠴⠀⠀⠀⠀⠀⠀⠙
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣷⣦⣍⣓⠒⠶⠦⠴⠟⠒⣋⣉⣴⣾⡟⠁⢀⠀⠀⠰⠀⣼⣿⣇⢸⡟⡆⠀⠀⠀⠀⢀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⣿⠁⠈⠉⠉⣛⣿⣿⣉⢿⡿⣻⡟⡀⠠⠋⠀⢀⠃⣼⣿⣿⣿⣷⢶⡧⣤⠤⠔⠛⠉⠀⠀⠀⢠⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⣿⣿⣿⣿⣄⠠⠤⣀⠙⠛⠟⢻⣿⣳⣟⡎⢠⠃⣀⣤⠞⣾⣿⣿⣿⣿⣿⣼⡗⠸⠀⠀⠀⠀⠀⢀⡴⠁⠐
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠹⣿⣿⡿⠻⣷⡄⠀⠈⠁⠒⠤⠄⠘⠿⠤⠗⠋⠁⠀⠈⠹⣿⣿⣿⣿⣿⣿⣿⣿⣖⢦⣤⣴⣾⣷⣦⣤⣶
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⣿⡁⠀⣸⣷⠀⠀⠀⠀⠀⠀⢸⠀⠀⠀⠀⠀⠀⢠⡄⢹⣿⣿⣿⣿⣿⣿⣿⣿⣾⣿⣿⣿⣿⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣾⣿⣿⣷⣄⡀⠀⠀⠀⢸⠀⠀⠀⠀⢀⡴⡟⠀⠀⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠿⠛⠛⠻
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⣿⣿⠿⢿⡇⠙⢭⣗⣤⣀⠈⢆⠀⠠⠔⠓⠒⠃⠀⠀⠈⠻⠿⠿⠿⠿⠿⠿⠿⠷⠒⠚⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢹⡧⠀⠘⠿⠄⠀⠙⠂⠀⠀⢈⢀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
''')
print("Welcome to Treasure Island.")
print("Your mission is to find the treasure.")
direction = str(input('You are at a cross road. Where do you want to go? Type "left" or "right": ')).strip().lower()
if direction == "left":
lake = str(input('You come to a lake. There is a island in the middle of the lake. '
'Type "wait" to wait for a boat. Type "swim" to swim across: ')).strip().lower()
if lake == "wait":
island = str(input('You arrive at the island unharmed. There is a house with 3 doors. '
'One Red, One Yellow, one Blue. Which color do you want to choose?: ')).strip().lower()
blue = f'You enter a room full of starving dogs. Game Over!'
red = "You enter a room where you see Jason Vorhees putin his mask on. Game Over!"
yellow = 'You found the One Piece. Congratulations, you are the Pirate King!'
if island == "blue":
print(blue)
elif island == "red":
print(red)
elif island == "yellow":
print(yellow)
else:
print('You were caught and killed by the enemies. Game Over!')
elif lake == "swim":
swim = "You got eaten by a pack of piranhas. Game Over!"
print(swim)
else:
print('You got eaten by a bear. Game Over!')
elif direction == "right":
right = 'You got ran over by a truck. Game Over!'
print(right)
else:
cliff = 'You fell off a cliff. Game Over.'
print(cliff)
| 43.865672
| 116
| 0.303505
|
4a061d548a5345a7d1cd70081c8acfd1cb7c8bd2
| 6,802
|
py
|
Python
|
tests/test_accounts.py
|
kullo/server
|
0ad28a9bf50346e654fcbf709d55c74bb48c98c7
|
[
"BSD-3-Clause"
] | 1
|
2021-06-15T07:47:54.000Z
|
2021-06-15T07:47:54.000Z
|
tests/test_accounts.py
|
kullo/server
|
0ad28a9bf50346e654fcbf709d55c74bb48c98c7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_accounts.py
|
kullo/server
|
0ad28a9bf50346e654fcbf709d55c74bb48c98c7
|
[
"BSD-3-Clause"
] | null | null | null |
# vim: set expandtab shiftwidth=4 :
# pylint: disable=missing-docstring
import json
import requests
from . import base
from . import db
from . import settings
def make_body(user):
return {
'address': user['address'],
'loginKey': user['loginKey'],
'privateDataKey': user['privateDataKey'],
'keypairEncryption': {
'pubkey': user['encryptionPubkey'],
'privkey': user['encryptionPrivkey'],
},
'keypairSigning': {
'pubkey': user['signingPubkey'],
'privkey': user['signingPrivkey'],
},
'acceptedTerms': user['acceptedTerms'],
}
def register_account(body, languages=None):
headers = {'content-type': 'application/json'}
if languages is not None:
headers['Accept-Language'] = languages
return requests.post(
settings.SERVER + '/accounts',
headers=headers,
data=json.dumps(body))
def update_body_with_challenge(req_body, resp_body):
req_body['challenge'] = resp_body['challenge']
req_body['challengeAuth'] = resp_body['challengeAuth']
class AccountsTest(base.BaseTest):
def tearDown(self):
with db.get_connection(settings.DB_CONNECTION_STRING) as conn:
with conn.cursor() as cursor:
for user in settings.NONEXISTING_USERS.itervalues():
db.delete_user(cursor, user)
for user in settings.RESERVATION_USERS.itervalues():
db.delete_user(cursor, user)
def send_initial_request(
self, user, expected_challenge_type,
expected_error_code=requests.codes.forbidden):
req_body = make_body(user)
resp = register_account(req_body)
self.assertEqual(resp.status_code, expected_error_code)
resp_body = json.loads(resp.text)
if expected_challenge_type is not None:
self.assertEqual(
resp_body['challenge']['type'],
expected_challenge_type)
return req_body, resp_body
def test_fail_on_existing_user(self):
# send initial request with existing user
self.send_initial_request(
settings.EXISTING_USERS[1],
None,
requests.codes.conflict)
def test_fail_on_inconsistent_user(self):
user = settings.RESERVATION_USERS[1]
# send initial request
req_body, resp_body = self.send_initial_request(user, 'reservation')
# reply with correct answer but modified address
update_body_with_challenge(req_body, resp_body)
req_body['challengeAnswer'] = user['reservation']
req_body['address'] = settings.RESERVATION_USERS[2]['address']
resp = register_account(req_body)
self.assertEqual(resp.status_code, requests.codes.forbidden)
def test_fail_on_modified_challenge(self):
user = settings.RESERVATION_USERS[1]
# send initial request
req_body, resp_body = self.send_initial_request(user, 'reservation')
# reply with correct answer but modified challenge
for field, value in (
('type', 'bad'),
('user', 'bad#kullo.test'),
('timestamp', 1234567890),
('text', 'bad')):
update_body_with_challenge(req_body, resp_body)
req_body['challengeAnswer'] = user['reservation']
req_body['challenge'][field] = value
resp = register_account(req_body)
self.assertEqual(resp.status_code, requests.codes.forbidden)
# reply with correct answer but modified challenge auth
update_body_with_challenge(req_body, resp_body)
req_body['challengeAnswer'] = user['reservation']
req_body['challengeAuth'] = 'bad'
resp = register_account(req_body)
self.assertEqual(resp.status_code, requests.codes.forbidden)
def test_reservation_fail_on_wrong_answer(self):
user = settings.RESERVATION_USERS[1]
# send initial request for reservation user
req_body, resp_body = self.send_initial_request(user, 'reservation')
# reply with wrong answer
update_body_with_challenge(req_body, resp_body)
req_body['challengeAnswer'] = 'bad'
resp = register_account(req_body)
self.assertEqual(resp.status_code, requests.codes.forbidden)
def test_reservation_success(self):
user = settings.RESERVATION_USERS[1]
# send initial request for reservation user
req_body, resp_body = self.send_initial_request(user, 'reservation')
# reply with correct answer
update_body_with_challenge(req_body, resp_body)
req_body['challengeAnswer'] = user['reservation']
resp = register_account(req_body, languages='de-DE')
self.assertEqual(resp.status_code, requests.codes.ok)
#TODO check user inbox
def test_fail_on_nonlocal_non_preregistered_address(self):
user = settings.NONLOCAL_USERS[1]
# send initial request for reservation user
req_body, resp_body = self.send_initial_request(user, 'blocked')
def test_reservation_success_with_nonlocal_address(self):
user = settings.NONLOCAL_RESERVATION_USERS[1]
# send initial request for reservation user
req_body, resp_body = self.send_initial_request(user, 'reservation')
# reply with correct answer
update_body_with_challenge(req_body, resp_body)
req_body['challengeAnswer'] = user['reservation']
resp = register_account(req_body)
self.assertEqual(resp.status_code, requests.codes.ok)
#TODO check user inbox
def test_reset_fail_on_wrong_answer(self):
user = settings.RESET_USERS[1]
#TODO add some messages
# send initial request for reset user
req_body, resp_body = self.send_initial_request(user, 'reset')
# reply with wrong answer
update_body_with_challenge(req_body, resp_body)
req_body['challengeAnswer'] = 'bad'
resp = register_account(req_body)
self.assertEqual(resp.status_code, requests.codes.forbidden)
#TODO check that old login still works
#TODO check that old messages are still there
def test_reset_success(self):
user = settings.RESET_USERS[1]
#TODO add some messages
# send initial request for reset user
req_body, resp_body = self.send_initial_request(user, 'reset')
# reply with correct answer
update_body_with_challenge(req_body, resp_body)
req_body['challengeAnswer'] = user['reset_code']
resp = register_account(req_body)
self.assertEqual(resp.status_code, requests.codes.ok)
#TODO check that new login works
#TODO check that messages are deleted
| 35.8
| 76
| 0.661276
|
4a061d80ad25b36712618e4a697833d1550b8fd9
| 78,652
|
py
|
Python
|
pymc3/sampling.py
|
Sooner0931/pymc3
|
875efa0d3bb4ef682b736f92816a75fc378d5a6e
|
[
"Apache-2.0"
] | 1
|
2020-09-05T05:52:09.000Z
|
2020-09-05T05:52:09.000Z
|
pymc3/sampling.py
|
pgerramirez/pymc3
|
458e513e47ed764c1ec4efcfce50ea7bd9fefbfd
|
[
"Apache-2.0"
] | null | null | null |
pymc3/sampling.py
|
pgerramirez/pymc3
|
458e513e47ed764c1ec4efcfce50ea7bd9fefbfd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for MCMC sampling."""
from typing import Dict, List, Optional, TYPE_CHECKING, cast, Union, Any
if TYPE_CHECKING:
from typing import Tuple
from typing import Iterable as TIterable
from collections.abc import Iterable
from collections import defaultdict
from copy import copy
import packaging
import pickle
import logging
import time
import warnings
import arviz
from arviz import InferenceData
import numpy as np
import theano.gradient as tg
from theano.tensor import Tensor
import xarray
from .backends.base import BaseTrace, MultiTrace
from .backends.ndarray import NDArray
from .distributions.distribution import draw_values
from .distributions.posterior_predictive import fast_sample_posterior_predictive
from .model import modelcontext, Point, all_continuous, Model
from .step_methods import (
NUTS,
HamiltonianMC,
Metropolis,
BinaryMetropolis,
BinaryGibbsMetropolis,
CategoricalGibbsMetropolis,
DEMetropolis,
Slice,
CompoundStep,
arraystep,
)
from .util import (
update_start_vals,
get_untransformed_name,
is_transformed_name,
get_default_varnames,
dataset_to_point_dict,
chains_and_samples,
)
from .vartypes import discrete_types
from .exceptions import IncorrectArgumentsError
from .parallel_sampling import _cpu_count, Draw
from pymc3.step_methods.hmc import quadpotential
import pymc3 as pm
from fastprogress.fastprogress import progress_bar
import sys
sys.setrecursionlimit(10000)
__all__ = [
"sample",
"iter_sample",
"sample_posterior_predictive",
"sample_posterior_predictive_w",
"init_nuts",
"sample_prior_predictive",
"fast_sample_posterior_predictive",
]
STEP_METHODS = (
NUTS,
HamiltonianMC,
Metropolis,
BinaryMetropolis,
BinaryGibbsMetropolis,
Slice,
CategoricalGibbsMetropolis,
)
ArrayLike = Union[np.ndarray, List[float]]
PointType = Dict[str, np.ndarray]
PointList = List[PointType]
_log = logging.getLogger("pymc3")
def instantiate_steppers(_model, steps, selected_steps, step_kwargs=None):
"""Instantiate steppers assigned to the model variables.
This function is intended to be called automatically from ``sample()``, but
may be called manually.
Parameters
----------
model : Model object
A fully-specified model object; legacy argument -- ignored
steps : step function or vector of step functions
One or more step functions that have been assigned to some subset of
the model's parameters. Defaults to None (no assigned variables).
selected_steps : dictionary of step methods and variables
The step methods and the variables that have were assigned to them.
step_kwargs : dict
Parameters for the samplers. Keys are the lower case names of
the step method, values a dict of arguments.
Returns
-------
methods : list
List of step methods associated with the model's variables.
"""
if step_kwargs is None:
step_kwargs = {}
used_keys = set()
for step_class, vars in selected_steps.items():
if len(vars) == 0:
continue
args = step_kwargs.get(step_class.name, {})
used_keys.add(step_class.name)
step = step_class(vars=vars, **args)
steps.append(step)
unused_args = set(step_kwargs).difference(used_keys)
if unused_args:
raise ValueError("Unused step method arguments: %s" % unused_args)
if len(steps) == 1:
steps = steps[0]
return steps
def assign_step_methods(model, step=None, methods=STEP_METHODS, step_kwargs=None):
"""Assign model variables to appropriate step methods.
Passing a specified model will auto-assign its constituent stochastic
variables to step methods based on the characteristics of the variables.
This function is intended to be called automatically from ``sample()``, but
may be called manually. Each step method passed should have a
``competence()`` method that returns an ordinal competence value
corresponding to the variable passed to it. This value quantifies the
appropriateness of the step method for sampling the variable.
Parameters
----------
model : Model object
A fully-specified model object
step : step function or vector of step functions
One or more step functions that have been assigned to some subset of
the model's parameters. Defaults to ``None`` (no assigned variables).
methods : vector of step method classes
The set of step methods from which the function may choose. Defaults
to the main step methods provided by PyMC3.
step_kwargs : dict
Parameters for the samplers. Keys are the lower case names of
the step method, values a dict of arguments.
Returns
-------
methods : list
List of step methods associated with the model's variables.
"""
steps = []
assigned_vars = set()
if step is not None:
try:
steps += list(step)
except TypeError:
steps.append(step)
for step in steps:
try:
assigned_vars = assigned_vars.union(set(step.vars))
except AttributeError:
for method in step.methods:
assigned_vars = assigned_vars.union(set(method.vars))
# Use competence classmethods to select step methods for remaining
# variables
selected_steps = defaultdict(list)
for var in model.free_RVs:
if var not in assigned_vars:
# determine if a gradient can be computed
has_gradient = var.dtype not in discrete_types
if has_gradient:
try:
tg.grad(model.logpt, var)
except (AttributeError, NotImplementedError, tg.NullTypeGradError):
has_gradient = False
# select the best method
selected = max(
methods,
key=lambda method, var=var, has_gradient=has_gradient: method._competence(
var, has_gradient
),
)
selected_steps[selected].append(var)
return instantiate_steppers(model, steps, selected_steps, step_kwargs)
def _print_step_hierarchy(s, level=0):
if isinstance(s, (list, tuple)):
_log.info(">" * level + "list")
for i in s:
_print_step_hierarchy(i, level + 1)
elif isinstance(s, CompoundStep):
_log.info(">" * level + "CompoundStep")
for i in s.methods:
_print_step_hierarchy(i, level + 1)
else:
varnames = ", ".join(
[
get_untransformed_name(v.name) if is_transformed_name(v.name) else v.name
for v in s.vars
]
)
_log.info(">" * level + "{}: [{}]".format(s.__class__.__name__, varnames))
def sample(
draws=1000,
step=None,
init="auto",
n_init=200000,
start=None,
trace=None,
chain_idx=0,
chains=None,
cores=None,
tune=1000,
progressbar=True,
model=None,
random_seed=None,
discard_tuned_samples=True,
compute_convergence_checks=True,
callback=None,
*,
return_inferencedata=None,
idata_kwargs: dict = None,
mp_ctx=None,
pickle_backend: str = "pickle",
**kwargs,
):
"""Draw samples from the posterior using the given step methods.
Multiple step methods are supported via compound step methods.
Parameters
----------
draws : int
The number of samples to draw. Defaults to 1000. The number of tuned samples are discarded
by default. See ``discard_tuned_samples``.
init : str
Initialization method to use for auto-assigned NUTS samplers.
* auto: Choose a default initialization method automatically.
Currently, this is ``jitter+adapt_diag``, but this can change in the future.
If you depend on the exact behaviour, choose an initialization method explicitly.
* adapt_diag: Start with a identity mass matrix and then adapt a diagonal based on the
variance of the tuning samples. All chains use the test value (usually the prior mean)
as starting point.
* jitter+adapt_diag: Same as ``adapt_diag``, but add uniform jitter in [-1, 1] to the
starting point in each chain.
* advi+adapt_diag: Run ADVI and then adapt the resulting diagonal mass matrix based on the
sample variance of the tuning samples.
* advi+adapt_diag_grad: Run ADVI and then adapt the resulting diagonal mass matrix based
on the variance of the gradients during tuning. This is **experimental** and might be
removed in a future release.
* advi: Run ADVI to estimate posterior mean and diagonal mass matrix.
* advi_map: Initialize ADVI with MAP and use MAP as starting point.
* map: Use the MAP as starting point. This is discouraged.
* adapt_full: Adapt a dense mass matrix using the sample covariances
step : function or iterable of functions
A step function or collection of functions. If there are variables without step methods,
step methods for those variables will be assigned automatically. By default the NUTS step
method will be used, if appropriate to the model; this is a good default for beginning
users.
n_init : int
Number of iterations of initializer. Only works for 'ADVI' init methods.
start : dict, or array of dict
Starting point in parameter space (or partial point)
Defaults to ``trace.point(-1))`` if there is a trace provided and model.test_point if not
(defaults to empty dict). Initialization methods for NUTS (see ``init`` keyword) can
overwrite the default.
trace : backend, list, or MultiTrace
This should be a backend instance, a list of variables to track, or a MultiTrace object
with past values. If a MultiTrace object is given, it must contain samples for the chain
number ``chain``. If None or a list of variables, the NDArray backend is used.
Passing either "text" or "sqlite" is taken as a shortcut to set up the corresponding
backend (with "mcmc" used as the base name).
chain_idx : int
Chain number used to store sample in backend. If ``chains`` is greater than one, chain
numbers will start here.
chains : int
The number of chains to sample. Running independent chains is important for some
convergence statistics and can also reveal multiple modes in the posterior. If ``None``,
then set to either ``cores`` or 2, whichever is larger.
cores : int
The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
system, but at most 4.
tune : int
Number of iterations to tune, defaults to 1000. Samplers adjust the step sizes, scalings or
similar during tuning. Tuning samples will be drawn in addition to the number specified in
the ``draws`` argument, and will be discarded unless ``discard_tuned_samples`` is set to
False.
progressbar : bool, optional default=True
Whether or not to display a progress bar in the command line. The bar shows the percentage
of completion, the sampling speed in samples per second (SPS), and the estimated remaining
time until completion ("expected time of arrival"; ETA).
model : Model (optional if in ``with`` context)
random_seed : int or list of ints
A list is accepted if ``cores`` is greater than one.
discard_tuned_samples : bool
Whether to discard posterior samples of the tune interval.
compute_convergence_checks : bool, default=True
Whether to compute sampler statistics like Gelman-Rubin and ``effective_n``.
callback : function, default=None
A function which gets called for every sample from the trace of a chain. The function is
called with the trace and the current draw and will contain all samples for a single trace.
the ``draw.chain`` argument can be used to determine which of the active chains the sample
is drawn from.
Sampling can be interrupted by throwing a ``KeyboardInterrupt`` in the callback.
return_inferencedata : bool, default=False
Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False)
Defaults to `False`, but we'll switch to `True` in an upcoming release.
idata_kwargs : dict, optional
Keyword arguments for :func:`arviz:arviz.from_pymc3`
mp_ctx : multiprocessing.context.BaseContent
A multiprocessing context for parallel sampling. See multiprocessing
documentation for details.
pickle_backend : str
One of `'pickle'` or `'dill'`. The library used to pickle models
in parallel sampling if the multiprocessing context is not of type
`fork`.
Returns
-------
trace : pymc3.backends.base.MultiTrace or arviz.InferenceData
A ``MultiTrace`` or ArviZ ``InferenceData`` object that contains the samples.
Notes
-----
Optional keyword arguments can be passed to ``sample`` to be delivered to the
``step_method``s used during sampling.
If your model uses only one step method, you can address step method kwargs
directly. In particular, the NUTS step method has several options including:
* target_accept : float in [0, 1]. The step size is tuned such that we
approximate this acceptance rate. Higher values like 0.9 or 0.95 often
work better for problematic posteriors
* max_treedepth : The maximum depth of the trajectory tree
* step_scale : float, default 0.25
The initial guess for the step size scaled down by :math:`1/n**(1/4)`
If your model uses multiple step methods, aka a Compound Step, then you have
two ways to address arguments to each step method:
A: If you let ``sample()`` automatically assign the ``step_method``s,
and you can correctly anticipate what they will be, then you can wrap
step method kwargs in a dict and pass that to sample() with a kwarg set
to the name of the step method.
e.g. for a CompoundStep comprising NUTS and BinaryGibbsMetropolis,
you could send:
1. ``target_accept`` to NUTS: nuts={'target_accept':0.9}
2. ``transit_p`` to BinaryGibbsMetropolis: binary_gibbs_metropolis={'transit_p':.7}
Note that available names are:
``nuts``, ``hmc``, ``metropolis``, ``binary_metropolis``,
``binary_gibbs_metropolis``, ``categorical_gibbs_metropolis``,
``DEMetropolis``, ``DEMetropolisZ``, ``slice``
B: If you manually declare the ``step_method``s, within the ``step``
kwarg, then you can address the ``step_method`` kwargs directly.
e.g. for a CompoundStep comprising NUTS and BinaryGibbsMetropolis,
you could send:
step=[pm.NUTS([freeRV1, freeRV2], target_accept=0.9),
pm.BinaryGibbsMetropolis([freeRV3], transit_p=.7)]
You can find a full list of arguments in the docstring of the step methods.
Examples
--------
.. code:: ipython
>>> import pymc3 as pm
... n = 100
... h = 61
... alpha = 2
... beta = 2
.. code:: ipython
>>> with pm.Model() as model: # context management
... p = pm.Beta('p', alpha=alpha, beta=beta)
... y = pm.Binomial('y', n=n, p=p, observed=h)
... trace = pm.sample()
>>> pm.summary(trace)
mean sd mc_error hpd_2.5 hpd_97.5
p 0.604625 0.047086 0.00078 0.510498 0.694774
"""
model = modelcontext(model)
if cores is None:
cores = min(4, _cpu_count())
if chains is None:
chains = max(2, cores)
if isinstance(start, dict):
start = [start] * chains
if random_seed == -1:
random_seed = None
if chains == 1 and isinstance(random_seed, int):
random_seed = [random_seed]
if random_seed is None or isinstance(random_seed, int):
if random_seed is not None:
np.random.seed(random_seed)
random_seed = [np.random.randint(2 ** 30) for _ in range(chains)]
if not isinstance(random_seed, Iterable):
raise TypeError("Invalid value for `random_seed`. Must be tuple, list or int")
if not discard_tuned_samples and not return_inferencedata:
warnings.warn(
"Tuning samples will be included in the returned `MultiTrace` object, which can lead to"
" complications in your downstream analysis. Please consider to switch to `InferenceData`:\n"
"`pm.sample(..., return_inferencedata=True)`",
UserWarning,
)
if return_inferencedata is None:
v = packaging.version.parse(pm.__version__)
if v.release[0] > 3 or v.release[1] >= 10:
warnings.warn(
"In an upcoming release, pm.sample will return an `arviz.InferenceData` object instead of a `MultiTrace` by default. "
"You can pass return_inferencedata=True or return_inferencedata=False to be safe and silence this warning.",
FutureWarning,
)
# set the default
return_inferencedata = False
if start is not None:
for start_vals in start:
_check_start_shape(model, start_vals)
# small trace warning
if draws == 0:
msg = "Tuning was enabled throughout the whole trace."
_log.warning(msg)
elif draws < 500:
msg = "Only %s samples in chain." % draws
_log.warning(msg)
draws += tune
if model.ndim == 0:
raise ValueError("The model does not contain any free variables.")
if step is None and init is not None and all_continuous(model.vars):
try:
# By default, try to use NUTS
_log.info("Auto-assigning NUTS sampler...")
start_, step = init_nuts(
init=init,
chains=chains,
n_init=n_init,
model=model,
random_seed=random_seed,
progressbar=progressbar,
**kwargs,
)
if start is None:
start = start_
except (AttributeError, NotImplementedError, tg.NullTypeGradError):
# gradient computation failed
_log.info("Initializing NUTS failed. " "Falling back to elementwise auto-assignment.")
_log.debug("Exception in init nuts", exec_info=True)
step = assign_step_methods(model, step, step_kwargs=kwargs)
else:
step = assign_step_methods(model, step, step_kwargs=kwargs)
if isinstance(step, list):
step = CompoundStep(step)
if start is None:
start = {}
if isinstance(start, dict):
start = [start] * chains
sample_args = {
"draws": draws,
"step": step,
"start": start,
"trace": trace,
"chain": chain_idx,
"chains": chains,
"tune": tune,
"progressbar": progressbar,
"model": model,
"random_seed": random_seed,
"cores": cores,
"callback": callback,
"discard_tuned_samples": discard_tuned_samples,
}
parallel_args = {
"pickle_backend": pickle_backend,
"mp_ctx": mp_ctx,
}
sample_args.update(kwargs)
has_population_samplers = np.any(
[
isinstance(m, arraystep.PopulationArrayStepShared)
for m in (step.methods if isinstance(step, CompoundStep) else [step])
]
)
parallel = cores > 1 and chains > 1 and not has_population_samplers
t_start = time.time()
if parallel:
_log.info("Multiprocess sampling ({} chains in {} jobs)".format(chains, cores))
_print_step_hierarchy(step)
try:
trace = _mp_sample(**sample_args, **parallel_args)
except pickle.PickleError:
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug("Pickling error:", exec_info=True)
parallel = False
except AttributeError as e:
if str(e).startswith("AttributeError: Can't pickle"):
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug("Pickling error:", exec_info=True)
parallel = False
else:
raise
if not parallel:
if has_population_samplers:
has_demcmc = np.any(
[
isinstance(m, DEMetropolis)
for m in (step.methods if isinstance(step, CompoundStep) else [step])
]
)
_log.info("Population sampling ({} chains)".format(chains))
if has_demcmc and chains < 3:
raise ValueError(
"DEMetropolis requires at least 3 chains. "
"For this {}-dimensional model you should use ≥{} chains".format(
model.ndim, model.ndim + 1
)
)
if has_demcmc and chains <= model.ndim:
warnings.warn(
"DEMetropolis should be used with more chains than dimensions! "
"(The model has {} dimensions.)".format(model.ndim),
UserWarning,
)
_print_step_hierarchy(step)
trace = _sample_population(**sample_args, parallelize=cores > 1)
else:
_log.info("Sequential sampling ({} chains in 1 job)".format(chains))
_print_step_hierarchy(step)
trace = _sample_many(**sample_args)
t_sampling = time.time() - t_start
# count the number of tune/draw iterations that happened
# ideally via the "tune" statistic, but not all samplers record it!
if "tune" in trace.stat_names:
stat = trace.get_sampler_stats("tune", chains=0)
# when CompoundStep is used, the stat is 2 dimensional!
if len(stat.shape) == 2:
stat = stat[:, 0]
stat = tuple(stat)
n_tune = stat.count(True)
n_draws = stat.count(False)
else:
# these may be wrong when KeyboardInterrupt happened, but they're better than nothing
n_tune = min(tune, len(trace))
n_draws = max(0, len(trace) - n_tune)
if discard_tuned_samples:
trace = trace[n_tune:]
# save metadata in SamplerReport
trace.report._n_tune = n_tune
trace.report._n_draws = n_draws
trace.report._t_sampling = t_sampling
n_chains = len(trace.chains)
_log.info(
f'Sampling {n_chains} chain{"s" if n_chains > 1 else ""} for {n_tune:_d} tune and {n_draws:_d} draw iterations '
f"({n_tune*n_chains:_d} + {n_draws*n_chains:_d} draws total) "
f"took {trace.report.t_sampling:.0f} seconds."
)
idata = None
if compute_convergence_checks or return_inferencedata:
ikwargs = dict(model=model, save_warmup=not discard_tuned_samples)
if idata_kwargs:
ikwargs.update(idata_kwargs)
idata = arviz.from_pymc3(trace, **ikwargs)
if compute_convergence_checks:
if draws - tune < 100:
warnings.warn("The number of samples is too small to check convergence reliably.")
else:
trace.report._run_convergence_checks(idata, model)
trace.report._log_summary()
if return_inferencedata:
return idata
else:
return trace
def _check_start_shape(model, start):
if not isinstance(start, dict):
raise TypeError("start argument must be a dict or an array-like of dicts")
e = ""
for var in model.vars:
if var.name in start.keys():
var_shape = var.shape.tag.test_value
start_var_shape = np.shape(start[var.name])
if start_var_shape:
if not np.array_equal(var_shape, start_var_shape):
e += "\nExpected shape {} for var '{}', got: {}".format(
tuple(var_shape), var.name, start_var_shape
)
# if start var has no shape
else:
# if model var has a specified shape
if var_shape.size > 0:
e += "\nExpected shape {} for var " "'{}', got scalar {}".format(
tuple(var_shape), var.name, start[var.name]
)
if e != "":
raise ValueError("Bad shape for start argument:{}".format(e))
def _sample_many(
draws, chain: int, chains: int, start: list, random_seed: list, step, callback=None, **kwargs,
):
"""Samples all chains sequentially.
Parameters
----------
draws: int
The number of samples to draw
chain: int
Number of the first chain in the sequence.
chains: int
Total number of chains to sample.
start: list
Starting points for each chain
random_seed: list
A list of seeds, one for each chain
step: function
Step function
Returns
-------
trace: MultiTrace
Contains samples of all chains
"""
traces = []
for i in range(chains):
trace = _sample(
draws=draws,
chain=chain + i,
start=start[i],
step=step,
random_seed=random_seed[i],
callback=callback,
**kwargs,
)
if trace is None:
if len(traces) == 0:
raise ValueError("Sampling stopped before a sample was created.")
else:
break
elif len(trace) < draws:
if len(traces) == 0:
traces.append(trace)
break
else:
traces.append(trace)
return MultiTrace(traces)
def _sample_population(
draws: int,
chain: int,
chains: int,
start,
random_seed,
step,
tune,
model,
progressbar: bool = True,
parallelize=False,
**kwargs,
):
"""Performs sampling of a population of chains using the ``PopulationStepper``.
Parameters
----------
draws : int
The number of samples to draw
chain : int
The number of the first chain in the population
chains : int
The total number of chains in the population
start : list
Start points for each chain
random_seed : int or list of ints, optional
A list is accepted if more if ``cores`` is greater than one.
step : function
Step function (should be or contain a population step method)
tune : int, optional
Number of iterations to tune, if applicable (defaults to None)
model : Model (optional if in ``with`` context)
progressbar : bool
Show progress bars? (defaults to True)
parallelize : bool
Setting for multiprocess parallelization
Returns
-------
trace : MultiTrace
Contains samples of all chains
"""
# create the generator that iterates all chains in parallel
chains = [chain + c for c in range(chains)]
sampling = _prepare_iter_population(
draws,
chains,
step,
start,
parallelize,
tune=tune,
model=model,
random_seed=random_seed,
progressbar=progressbar,
)
if progressbar:
sampling = progress_bar(sampling, total=draws, display=progressbar)
latest_traces = None
for it, traces in enumerate(sampling):
latest_traces = traces
return MultiTrace(latest_traces)
def _sample(
chain: int,
progressbar: bool,
random_seed,
start,
draws: int,
step=None,
trace=None,
tune=None,
model: Optional[Model] = None,
callback=None,
**kwargs,
):
"""Main iteration for singleprocess sampling.
Multiple step methods are supported via compound step methods.
Parameters
----------
chain : int
Number of the chain that the samples will belong to.
progressbar : bool
Whether or not to display a progress bar in the command line. The bar shows the percentage
of completion, the sampling speed in samples per second (SPS), and the estimated remaining
time until completion ("expected time of arrival"; ETA).
random_seed : int or list of ints
A list is accepted if ``cores`` is greater than one.
start : dict
Starting point in parameter space (or partial point)
draws : int
The number of samples to draw
step : function
Step function
trace : backend, list, or MultiTrace
This should be a backend instance, a list of variables to track, or a MultiTrace object
with past values. If a MultiTrace object is given, it must contain samples for the chain
number ``chain``. If None or a list of variables, the NDArray backend is used.
tune : int, optional
Number of iterations to tune, if applicable (defaults to None)
model : Model (optional if in ``with`` context)
Returns
-------
strace : pymc3.backends.base.BaseTrace
A ``BaseTrace`` object that contains the samples for this chain.
"""
skip_first = kwargs.get("skip_first", 0)
sampling = _iter_sample(draws, step, start, trace, chain, tune, model, random_seed, callback)
_pbar_data = {"chain": chain, "divergences": 0}
_desc = "Sampling chain {chain:d}, {divergences:,d} divergences"
if progressbar:
sampling = progress_bar(sampling, total=draws, display=progressbar)
sampling.comment = _desc.format(**_pbar_data)
try:
strace = None
for it, (strace, diverging) in enumerate(sampling):
if it >= skip_first and diverging:
_pbar_data["divergences"] += 1
if progressbar:
sampling.comment = _desc.format(**_pbar_data)
except KeyboardInterrupt:
pass
return strace
def iter_sample(
draws: int,
step,
start: Optional[Dict[Any, Any]] = None,
trace=None,
chain=0,
tune: Optional[int] = None,
model: Optional[Model] = None,
random_seed: Optional[Union[int, List[int]]] = None,
callback=None,
):
"""Generate a trace on each iteration using the given step method.
Multiple step methods ared supported via compound step methods. Returns the
amount of time taken.
Parameters
----------
draws : int
The number of samples to draw
step : function
Step function
start : dict
Starting point in parameter space (or partial point). Defaults to trace.point(-1)) if
there is a trace provided and model.test_point if not (defaults to empty dict)
trace : backend, list, or MultiTrace
This should be a backend instance, a list of variables to track, or a MultiTrace object
with past values. If a MultiTrace object is given, it must contain samples for the chain
number ``chain``. If None or a list of variables, the NDArray backend is used.
chain : int, optional
Chain number used to store sample in backend. If ``cores`` is greater than one, chain numbers
will start here.
tune : int, optional
Number of iterations to tune, if applicable (defaults to None)
model : Model (optional if in ``with`` context)
random_seed : int or list of ints, optional
A list is accepted if more if ``cores`` is greater than one.
callback :
A function which gets called for every sample from the trace of a chain. The function is
called with the trace and the current draw and will contain all samples for a single trace.
the ``draw.chain`` argument can be used to determine which of the active chains the sample
is drawn from.
Sampling can be interrupted by throwing a ``KeyboardInterrupt`` in the callback.
Yields
------
trace : MultiTrace
Contains all samples up to the current iteration
Examples
--------
::
for trace in iter_sample(500, step):
...
"""
sampling = _iter_sample(draws, step, start, trace, chain, tune, model, random_seed, callback)
for i, (strace, _) in enumerate(sampling):
yield MultiTrace([strace[: i + 1]])
def _iter_sample(
draws,
step,
start=None,
trace=None,
chain=0,
tune=None,
model=None,
random_seed=None,
callback=None,
):
"""Generator for sampling one chain. (Used in singleprocess sampling.)
Parameters
----------
draws : int
The number of samples to draw
step : function
Step function
start : dict, optional
Starting point in parameter space (or partial point). Defaults to trace.point(-1)) if
there is a trace provided and model.test_point if not (defaults to empty dict)
trace : backend, list, MultiTrace, or None
This should be a backend instance, a list of variables to track, or a MultiTrace object
with past values. If a MultiTrace object is given, it must contain samples for the chain
number ``chain``. If None or a list of variables, the NDArray backend is used.
chain : int, optional
Chain number used to store sample in backend. If ``cores`` is greater than one, chain numbers
will start here.
tune : int, optional
Number of iterations to tune, if applicable (defaults to None)
model : Model (optional if in ``with`` context)
random_seed : int or list of ints, optional
A list is accepted if more if ``cores`` is greater than one.
Yields
------
strace : BaseTrace
The trace object containing the samples for this chain
diverging : bool
Indicates if the draw is divergent. Only available with some samplers.
"""
model = modelcontext(model)
draws = int(draws)
if random_seed is not None:
np.random.seed(random_seed)
if draws < 1:
raise ValueError("Argument `draws` must be greater than 0.")
if start is None:
start = {}
strace = _choose_backend(trace, chain, model=model)
if len(strace) > 0:
update_start_vals(start, strace.point(-1), model)
else:
update_start_vals(start, model.test_point, model)
try:
step = CompoundStep(step)
except TypeError:
pass
point = Point(start, model=model)
if step.generates_stats and strace.supports_sampler_stats:
strace.setup(draws, chain, step.stats_dtypes)
else:
strace.setup(draws, chain)
try:
step.tune = bool(tune)
if hasattr(step, "reset_tuning"):
step.reset_tuning()
for i in range(draws):
stats = None
diverging = False
if i == 0 and hasattr(step, "iter_count"):
step.iter_count = 0
if i == tune:
step = stop_tuning(step)
if step.generates_stats:
point, stats = step.step(point)
if strace.supports_sampler_stats:
strace.record(point, stats)
diverging = i > tune and stats and stats[0].get("diverging")
else:
strace.record(point)
else:
point = step.step(point)
strace.record(point)
if callback is not None:
warns = getattr(step, "warnings", None)
callback(
trace=strace, draw=Draw(chain, i == draws, i, i < tune, stats, point, warns),
)
yield strace, diverging
except KeyboardInterrupt:
strace.close()
if hasattr(step, "warnings"):
warns = step.warnings()
strace._add_warnings(warns)
raise
except BaseException:
strace.close()
raise
else:
strace.close()
if hasattr(step, "warnings"):
warns = step.warnings()
strace._add_warnings(warns)
class PopulationStepper:
"""Wraps population of step methods to step them in parallel with single or multiprocessing."""
def __init__(self, steppers, parallelize, progressbar=True):
"""Use multiprocessing to parallelize chains.
Falls back to sequential evaluation if multiprocessing fails.
In the multiprocessing mode of operation, a new process is started for each
chain/stepper and Pipes are used to communicate with the main process.
Parameters
----------
steppers : list
A collection of independent step methods, one for each chain.
parallelize : bool
Indicates if parallelization via multiprocessing is desired.
progressbar : bool
Should we display a progress bar showing relative progress?
"""
self.nchains = len(steppers)
self.is_parallelized = False
self._primary_ends = []
self._processes = []
self._steppers = steppers
if parallelize:
try:
# configure a child process for each stepper
_log.info(
"Attempting to parallelize chains to all cores. You can turn this off with `pm.sample(cores=1)`."
)
import multiprocessing
for c, stepper in (
enumerate(progress_bar(steppers)) if progressbar else enumerate(steppers)
):
secondary_end, primary_end = multiprocessing.Pipe()
stepper_dumps = pickle.dumps(stepper, protocol=4)
process = multiprocessing.Process(
target=self.__class__._run_secondary,
args=(c, stepper_dumps, secondary_end),
name="ChainWalker{}".format(c),
)
# we want the child process to exit if the parent is terminated
process.daemon = True
# Starting the process might fail and takes time.
# By doing it in the constructor, the sampling progress bar
# will not be confused by the process start.
process.start()
self._primary_ends.append(primary_end)
self._processes.append(process)
self.is_parallelized = True
except Exception:
_log.info(
"Population parallelization failed. "
"Falling back to sequential stepping of chains."
)
_log.debug("Error was: ", exec_info=True)
else:
_log.info(
"Chains are not parallelized. You can enable this by passing "
"`pm.sample(cores=n)`, where n > 1."
)
return super().__init__()
def __enter__(self):
"""Do nothing: processes are already started in ``__init__``."""
return
def __exit__(self, exc_type, exc_val, exc_tb):
if len(self._processes) > 0:
try:
for primary_end in self._primary_ends:
primary_end.send(None)
for process in self._processes:
process.join(timeout=3)
except Exception:
_log.warning("Termination failed.")
return
@staticmethod
def _run_secondary(c, stepper_dumps, secondary_end):
"""This method is started on a separate process to perform stepping of a chain.
Parameters
----------
c : int
number of this chain
stepper : BlockedStep
a step method such as CompoundStep
secondary_end : multiprocessing.connection.PipeConnection
This is our connection to the main process
"""
# re-seed each child process to make them unique
np.random.seed(None)
try:
stepper = pickle.loads(stepper_dumps)
# the stepper is not necessarily a PopulationArraySharedStep itself,
# but rather a CompoundStep. PopulationArrayStepShared.population
# has to be updated, therefore we identify the substeppers first.
population_steppers = []
for sm in stepper.methods if isinstance(stepper, CompoundStep) else [stepper]:
if isinstance(sm, arraystep.PopulationArrayStepShared):
population_steppers.append(sm)
while True:
incoming = secondary_end.recv()
# receiving a None is the signal to exit
if incoming is None:
break
tune_stop, population = incoming
if tune_stop:
stop_tuning(stepper)
# forward the population to the PopulationArrayStepShared objects
# This is necessary because due to the process fork, the population
# object is no longer shared between the steppers.
for popstep in population_steppers:
popstep.population = population
update = stepper.step(population[c])
secondary_end.send(update)
except Exception:
_log.exception("ChainWalker{}".format(c))
return
def step(self, tune_stop, population):
"""Step the entire population of chains.
Parameters
----------
tune_stop : bool
Indicates if the condition (i == tune) is fulfilled
population : list
Current Points of all chains
Returns
-------
update : list
List of (Point, stats) tuples for all chains
"""
updates = [None] * self.nchains
if self.is_parallelized:
for c in range(self.nchains):
self._primary_ends[c].send((tune_stop, population))
# Blockingly get the step outcomes
for c in range(self.nchains):
updates[c] = self._primary_ends[c].recv()
else:
for c in range(self.nchains):
if tune_stop:
self._steppers[c] = stop_tuning(self._steppers[c])
updates[c] = self._steppers[c].step(population[c])
return updates
def _prepare_iter_population(
draws: int,
chains: list,
step,
start: list,
parallelize: bool,
tune=None,
model=None,
random_seed=None,
progressbar=True,
):
"""Prepare a PopulationStepper and traces for population sampling.
Parameters
----------
draws : int
The number of samples to draw
chains : list
The chain numbers in the population
step : function
Step function (should be or contain a population step method)
start : list
Start points for each chain
parallelize : bool
Setting for multiprocess parallelization
tune : int, optional
Number of iterations to tune, if applicable (defaults to None)
model : Model (optional if in ``with`` context)
random_seed : int or list of ints, optional
A list is accepted if more if ``cores`` is greater than one.
progressbar : bool
``progressbar`` argument for the ``PopulationStepper``, (defaults to True)
Returns
-------
_iter_population : generator
Yields traces of all chains at the same time
"""
# chains contains the chain numbers, but for indexing we need indices...
nchains = len(chains)
model = modelcontext(model)
draws = int(draws)
if random_seed is not None:
np.random.seed(random_seed)
if draws < 1:
raise ValueError("Argument `draws` should be above 0.")
# The initialization of traces, samplers and points must happen in the right order:
# 1. traces are initialized and update_start_vals configures variable transforms
# 2. population of points is created
# 3. steppers are initialized and linked to the points object
# 4. traces are configured to track the sampler stats
# 5. a PopulationStepper is configured for parallelized stepping
# 1. prepare a BaseTrace for each chain
traces = [_choose_backend(None, chain, model=model) for chain in chains]
for c, strace in enumerate(traces):
# initialize the trace size and variable transforms
if len(strace) > 0:
update_start_vals(start[c], strace.point(-1), model)
else:
update_start_vals(start[c], model.test_point, model)
# 2. create a population (points) that tracks each chain
# it is updated as the chains are advanced
population = [Point(start[c], model=model) for c in range(nchains)]
# 3. Set up the steppers
steppers = [None] * nchains
for c in range(nchains):
# need indepenent samplers for each chain
# it is important to copy the actual steppers (but not the delta_logp)
if isinstance(step, CompoundStep):
chainstep = CompoundStep([copy(m) for m in step.methods])
else:
chainstep = copy(step)
# link population samplers to the shared population state
for sm in chainstep.methods if isinstance(step, CompoundStep) else [chainstep]:
if isinstance(sm, arraystep.PopulationArrayStepShared):
sm.link_population(population, c)
steppers[c] = chainstep
# 4. configure tracking of sampler stats
for c in range(nchains):
if steppers[c].generates_stats and traces[c].supports_sampler_stats:
traces[c].setup(draws, c, steppers[c].stats_dtypes)
else:
traces[c].setup(draws, c)
# 5. configure the PopulationStepper (expensive call)
popstep = PopulationStepper(steppers, parallelize, progressbar=progressbar)
# Because the preparations above are expensive, the actual iterator is
# in another method. This way the progbar will not be disturbed.
return _iter_population(draws, tune, popstep, steppers, traces, population)
def _iter_population(draws, tune, popstep, steppers, traces, points):
"""Iterate a ``PopulationStepper``.
Parameters
----------
draws : int
number of draws per chain
tune : int
number of tuning steps
popstep : PopulationStepper
the helper object for (parallelized) stepping of chains
steppers : list
The step methods for each chain
traces : list
Traces for each chain
points : list
population of chain states
Yields
------
traces : list
List of trace objects of the individual chains
"""
try:
with popstep:
# iterate draws of all chains
for i in range(draws):
# this call steps all chains and returns a list of (point, stats)
# the `popstep` may interact with subprocesses internally
updates = popstep.step(i == tune, points)
# apply the update to the points and record to the traces
for c, strace in enumerate(traces):
if steppers[c].generates_stats:
points[c], stats = updates[c]
if strace.supports_sampler_stats:
strace.record(points[c], stats)
else:
strace.record(points[c])
else:
points[c] = updates[c]
strace.record(points[c])
# yield the state of all chains in parallel
yield traces
except KeyboardInterrupt:
for c, strace in enumerate(traces):
strace.close()
if hasattr(steppers[c], "report"):
steppers[c].report._finalize(strace)
raise
except BaseException:
for c, strace in enumerate(traces):
strace.close()
raise
else:
for c, strace in enumerate(traces):
strace.close()
if hasattr(steppers[c], "report"):
steppers[c].report._finalize(strace)
def _choose_backend(trace, chain, shortcuts=None, **kwds):
"""Selects or creates a trace backend (NDArray, Text, etc) for a particular chain.
Parameters
----------
trace : backend, list, MultiTrace, or None
This should be a BaseTrace, backend name (e.g. text, sqlite, or hdf5),
list of variables to track, or a MultiTrace object with past values.
If a MultiTrace object is given, it must contain samples for the chain number ``chain``.
If None or a list of variables, the NDArray backend is used.
chain : int
Number of the chain of interest.
shortcuts : dict, optional
maps backend names to a dict of backend class and name (defaults to pm.backends._shortcuts)
**kwds :
keyword arguments to forward to the backend creation
Returns
-------
trace : BaseTrace
A trace object for the selected chain
"""
if isinstance(trace, BaseTrace):
return trace
if isinstance(trace, MultiTrace):
return trace._straces[chain]
if trace is None:
return NDArray(**kwds)
if shortcuts is None:
shortcuts = pm.backends._shortcuts
try:
backend = shortcuts[trace]["backend"]
name = shortcuts[trace]["name"]
return backend(name, **kwds)
except TypeError:
return NDArray(vars=trace, **kwds)
except KeyError:
raise ValueError("Argument `trace` is invalid.")
def _mp_sample(
draws: int,
tune: int,
step,
chains: int,
cores: int,
chain: int,
random_seed: list,
start: list,
progressbar=True,
trace=None,
model=None,
callback=None,
discard_tuned_samples=True,
mp_ctx=None,
pickle_backend="pickle",
**kwargs,
):
"""Main iteration for multiprocess sampling.
Parameters
----------
draws : int
The number of samples to draw
tune : int, optional
Number of iterations to tune, if applicable (defaults to None)
step : function
Step function
chains : int
The number of chains to sample.
cores : int
The number of chains to run in parallel.
chain : int
Number of the first chain.
random_seed : list of ints
Random seeds for each chain.
start : list
Starting points for each chain.
progressbar : bool
Whether or not to display a progress bar in the command line.
trace : backend, list, MultiTrace or None
This should be a backend instance, a list of variables to track, or a MultiTrace object
with past values. If a MultiTrace object is given, it must contain samples for the chain
number ``chain``. If None or a list of variables, the NDArray backend is used.
model : Model (optional if in ``with`` context)
callback : Callable
A function which gets called for every sample from the trace of a chain. The function is
called with the trace and the current draw and will contain all samples for a single trace.
the ``draw.chain`` argument can be used to determine which of the active chains the sample
is drawn from.
Sampling can be interrupted by throwing a ``KeyboardInterrupt`` in the callback.
Returns
-------
trace : pymc3.backends.base.MultiTrace
A ``MultiTrace`` object that contains the samples for all chains.
"""
import pymc3.parallel_sampling as ps
# We did draws += tune in pm.sample
draws -= tune
traces = []
for idx in range(chain, chain + chains):
if trace is not None:
strace = _choose_backend(copy(trace), idx, model=model)
else:
strace = _choose_backend(None, idx, model=model)
# for user supply start value, fill-in missing value if the supplied
# dict does not contain all parameters
update_start_vals(start[idx - chain], model.test_point, model)
if step.generates_stats and strace.supports_sampler_stats:
strace.setup(draws + tune, idx + chain, step.stats_dtypes)
else:
strace.setup(draws + tune, idx + chain)
traces.append(strace)
sampler = ps.ParallelSampler(
draws,
tune,
chains,
cores,
random_seed,
start,
step,
chain,
progressbar,
mp_ctx=mp_ctx,
pickle_backend=pickle_backend,
)
try:
try:
with sampler:
for draw in sampler:
trace = traces[draw.chain - chain]
if trace.supports_sampler_stats and draw.stats is not None:
trace.record(draw.point, draw.stats)
else:
trace.record(draw.point)
if draw.is_last:
trace.close()
if draw.warnings is not None:
trace._add_warnings(draw.warnings)
if callback is not None:
callback(trace=trace, draw=draw)
except ps.ParallelSamplingError as error:
trace = traces[error._chain - chain]
trace._add_warnings(error._warnings)
for trace in traces:
trace.close()
multitrace = MultiTrace(traces)
multitrace._report._log_summary()
raise
return MultiTrace(traces)
except KeyboardInterrupt:
if discard_tuned_samples:
traces, length = _choose_chains(traces, tune)
else:
traces, length = _choose_chains(traces, 0)
return MultiTrace(traces)[:length]
finally:
for trace in traces:
trace.close()
def _choose_chains(traces, tune):
if tune is None:
tune = 0
if not traces:
return []
lengths = [max(0, len(trace) - tune) for trace in traces]
if not sum(lengths):
raise ValueError("Not enough samples to build a trace.")
idxs = np.argsort(lengths)[::-1]
l_sort = np.array(lengths)[idxs]
final_length = l_sort[0]
last_total = 0
for i, length in enumerate(l_sort):
total = (i + 1) * length
if total < last_total:
use_until = i
break
last_total = total
final_length = length
else:
use_until = len(lengths)
return [traces[idx] for idx in idxs[:use_until]], final_length + tune
def stop_tuning(step):
"""Stop tuning the current step method."""
step.stop_tuning()
return step
class _DefaultTrace:
"""
Utility for collecting samples into a dictionary.
Name comes from its similarity to ``defaultdict``:
entries are lazily created.
Parameters
----------
samples : int
The number of samples that will be collected, per variable,
into the trace.
Attributes
----------
trace_dict : Dict[str, np.ndarray]
A dictionary constituting a trace. Should be extracted
after a procedure has filled the `_DefaultTrace` using the
`insert()` method
"""
trace_dict = {} # type: Dict[str, np.ndarray]
_len = None # type: int
def __init__(self, samples: int):
self._len = samples
self.trace_dict = {}
def insert(self, k: str, v, idx: int):
"""
Insert `v` as the value of the `idx`th sample for the variable `k`.
Parameters
----------
k: str
Name of the variable.
v: anything that can go into a numpy array (including a numpy array)
The value of the `idx`th sample from variable `k`
ids: int
The index of the sample we are inserting into the trace.
"""
if hasattr(v, "shape"):
value_shape = tuple(v.shape) # type: Tuple[int, ...]
else:
value_shape = ()
# initialize if necessary
if k not in self.trace_dict:
array_shape = (self._len,) + value_shape
self.trace_dict[k] = np.empty(array_shape, dtype=np.array(v).dtype)
# do the actual insertion
if value_shape == ():
self.trace_dict[k][idx] = v
else:
self.trace_dict[k][idx, :] = v
def sample_posterior_predictive(
trace,
samples: Optional[int] = None,
model: Optional[Model] = None,
vars: Optional[TIterable[Tensor]] = None,
var_names: Optional[List[str]] = None,
size: Optional[int] = None,
keep_size: Optional[bool] = False,
random_seed=None,
progressbar: bool = True,
) -> Dict[str, np.ndarray]:
"""Generate posterior predictive samples from a model given a trace.
Parameters
----------
trace : backend, list, xarray.Dataset, arviz.InferenceData, or MultiTrace
Trace generated from MCMC sampling, or a list of dicts (eg. points or from find_MAP()),
or xarray.Dataset (eg. InferenceData.posterior or InferenceData.prior)
samples : int
Number of posterior predictive samples to generate. Defaults to one posterior predictive
sample per posterior sample, that is, the number of draws times the number of chains. It
is not recommended to modify this value; when modified, some chains may not be represented
in the posterior predictive sample.
model : Model (optional if in ``with`` context)
Model used to generate ``trace``
vars : iterable
Variables for which to compute the posterior predictive samples.
Deprecated: please use ``var_names`` instead.
var_names : Iterable[str]
Names of variables for which to compute the posterior predictive samples.
size : int
The number of random draws from the distribution specified by the parameters in each
sample of the trace. Not recommended unless more than ndraws times nchains posterior
predictive samples are needed.
keep_size : bool, optional
Force posterior predictive sample to have the same shape as posterior and sample stats
data: ``(nchains, ndraws, ...)``. Overrides samples and size parameters.
random_seed : int
Seed for the random number generator.
progressbar : bool
Whether or not to display a progress bar in the command line. The bar shows the percentage
of completion, the sampling speed in samples per second (SPS), and the estimated remaining
time until completion ("expected time of arrival"; ETA).
Returns
-------
samples : dict
Dictionary with the variable names as keys, and values numpy arrays containing
posterior predictive samples.
"""
_trace: Union[MultiTrace, PointList]
if isinstance(trace, InferenceData):
_trace = dataset_to_point_dict(trace.posterior)
elif isinstance(trace, xarray.Dataset):
_trace = dataset_to_point_dict(trace)
else:
_trace = trace
nchain: int
len_trace: int
if isinstance(trace, (InferenceData, xarray.Dataset)):
nchain, len_trace = chains_and_samples(trace)
else:
len_trace = len(_trace)
try:
nchain = _trace.nchains
except AttributeError:
nchain = 1
if keep_size and samples is not None:
raise IncorrectArgumentsError("Should not specify both keep_size and samples arguments")
if keep_size and size is not None:
raise IncorrectArgumentsError("Should not specify both keep_size and size arguments")
if samples is None:
if isinstance(_trace, MultiTrace):
samples = sum(len(v) for v in _trace._straces.values())
elif isinstance(_trace, list) and all((isinstance(x, dict) for x in _trace)):
# this is a list of points
samples = len(_trace)
else:
raise TypeError(
"Do not know how to compute number of samples for trace argument of type %s"
% type(_trace)
)
assert samples is not None
if samples < len_trace * nchain:
warnings.warn(
"samples parameter is smaller than nchains times ndraws, some draws "
"and/or chains may not be represented in the returned posterior "
"predictive sample"
)
model = modelcontext(model)
if var_names is not None:
if vars is not None:
raise IncorrectArgumentsError("Should not specify both vars and var_names arguments.")
else:
vars = [model[x] for x in var_names]
elif vars is not None: # var_names is None, and vars is not.
warnings.warn("vars argument is deprecated in favor of var_names.", DeprecationWarning)
if vars is None:
vars = model.observed_RVs
if random_seed is not None:
np.random.seed(random_seed)
indices = np.arange(samples)
if progressbar:
indices = progress_bar(indices, total=samples, display=progressbar)
ppc_trace_t = _DefaultTrace(samples)
try:
for idx in indices:
if nchain > 1:
# the trace object will either be a MultiTrace (and have _straces)...
if hasattr(_trace, "_straces"):
chain_idx, point_idx = np.divmod(idx, len_trace)
param = cast(MultiTrace, _trace)._straces[chain_idx % nchain].point(point_idx)
# ... or a PointList
else:
param = cast(PointList, _trace)[idx % len_trace]
# there's only a single chain, but the index might hit it multiple times if
# the number of indices is greater than the length of the trace.
else:
param = _trace[idx % len_trace]
values = draw_values(vars, point=param, size=size)
for k, v in zip(vars, values):
ppc_trace_t.insert(k.name, v, idx)
except KeyboardInterrupt:
pass
ppc_trace = ppc_trace_t.trace_dict
if keep_size:
for k, ary in ppc_trace.items():
ppc_trace[k] = ary.reshape((nchain, len_trace, *ary.shape[1:]))
return ppc_trace
def sample_posterior_predictive_w(
traces,
samples: Optional[int] = None,
models: Optional[List[Model]] = None,
weights: Optional[ArrayLike] = None,
random_seed: Optional[int] = None,
progressbar: bool = True,
):
"""Generate weighted posterior predictive samples from a list of models and
a list of traces according to a set of weights.
Parameters
----------
traces : list or list of lists
List of traces generated from MCMC sampling (xarray.Dataset, arviz.InferenceData, or
MultiTrace), or a list of list containing dicts from find_MAP() or points. The number of
traces should be equal to the number of weights.
samples : int, optional
Number of posterior predictive samples to generate. Defaults to the
length of the shorter trace in traces.
models : list of Model
List of models used to generate the list of traces. The number of models should be equal to
the number of weights and the number of observed RVs should be the same for all models.
By default a single model will be inferred from ``with`` context, in this case results will
only be meaningful if all models share the same distributions for the observed RVs.
weights : array-like, optional
Individual weights for each trace. Default, same weight for each model.
random_seed : int, optional
Seed for the random number generator.
progressbar : bool, optional default True
Whether or not to display a progress bar in the command line. The bar shows the percentage
of completion, the sampling speed in samples per second (SPS), and the estimated remaining
time until completion ("expected time of arrival"; ETA).
Returns
-------
samples : dict
Dictionary with the variables as keys. The values corresponding to the
posterior predictive samples from the weighted models.
"""
np.random.seed(random_seed)
if isinstance(traces[0], InferenceData):
n_samples = [
trace.posterior.sizes["chain"] * trace.posterior.sizes["draw"] for trace in traces
]
traces = [dataset_to_point_dict(trace.posterior) for trace in traces]
elif isinstance(traces[0], xarray.Dataset):
n_samples = [trace.sizes["chain"] * trace.sizes["draw"] for trace in traces]
traces = [dataset_to_point_dict(trace) for trace in traces]
else:
n_samples = [len(i) * i.nchains for i in traces]
if models is None:
models = [modelcontext(models)] * len(traces)
if weights is None:
weights = [1] * len(traces)
if len(traces) != len(weights):
raise ValueError("The number of traces and weights should be the same")
if len(models) != len(weights):
raise ValueError("The number of models and weights should be the same")
length_morv = len(models[0].observed_RVs)
if not all(len(i.observed_RVs) == length_morv for i in models):
raise ValueError("The number of observed RVs should be the same for all models")
weights = np.asarray(weights)
p = weights / np.sum(weights)
min_tr = min(n_samples)
n = (min_tr * p).astype("int")
# ensure n sum up to min_tr
idx = np.argmax(n)
n[idx] = n[idx] + min_tr - np.sum(n)
trace = []
for i, j in enumerate(n):
tr = traces[i]
len_trace = len(tr)
try:
nchain = tr.nchains
except AttributeError:
nchain = 1
indices = np.random.randint(0, nchain * len_trace, j)
if nchain > 1:
chain_idx, point_idx = np.divmod(indices, len_trace)
for idx in zip(chain_idx, point_idx):
trace.append(tr._straces[idx[0]].point(idx[1]))
else:
for idx in indices:
trace.append(tr[idx])
obs = [x for m in models for x in m.observed_RVs]
variables = np.repeat(obs, n)
lengths = list(set([np.atleast_1d(observed).shape for observed in obs]))
if len(lengths) == 1:
size = [None for i in variables]
elif len(lengths) > 2:
raise ValueError("Observed variables could not be broadcast together")
else:
size = []
x = np.zeros(shape=lengths[0])
y = np.zeros(shape=lengths[1])
b = np.broadcast(x, y)
for var in variables:
shape = np.shape(np.atleast_1d(var.distribution.default()))
if shape != b.shape:
size.append(b.shape)
else:
size.append(None)
len_trace = len(trace)
if samples is None:
samples = len_trace
indices = np.random.randint(0, len_trace, samples)
if progressbar:
indices = progress_bar(indices, total=samples, display=progressbar)
try:
ppc = defaultdict(list)
for idx in indices:
param = trace[idx]
var = variables[idx]
# TODO sample_posterior_predictive_w is currently only work for model with
# one observed.
ppc[var.name].append(draw_values([var], point=param, size=size[idx])[0])
except KeyboardInterrupt:
pass
return {k: np.asarray(v) for k, v in ppc.items()}
def sample_prior_predictive(
samples=500,
model: Optional[Model] = None,
vars: Optional[TIterable[str]] = None,
var_names: Optional[TIterable[str]] = None,
random_seed=None,
) -> Dict[str, np.ndarray]:
"""Generate samples from the prior predictive distribution.
Parameters
----------
samples : int
Number of samples from the prior predictive to generate. Defaults to 500.
model : Model (optional if in ``with`` context)
vars : Iterable[str]
A list of names of variables for which to compute the posterior predictive
samples. *DEPRECATED* - Use ``var_names`` argument instead.
var_names : Iterable[str]
A list of names of variables for which to compute the posterior predictive
samples. Defaults to both observed and unobserved RVs.
random_seed : int
Seed for the random number generator.
Returns
-------
dict
Dictionary with variable names as keys. The values are numpy arrays of prior
samples.
"""
model = modelcontext(model)
if vars is None and var_names is None:
prior_pred_vars = model.observed_RVs
prior_vars = (
get_default_varnames(model.unobserved_RVs, include_transformed=True) + model.potentials
)
vars_ = [var.name for var in prior_vars + prior_pred_vars]
vars = set(vars_)
elif vars is None:
vars = var_names
vars_ = vars
elif vars is not None:
warnings.warn("vars argument is deprecated in favor of var_names.", DeprecationWarning)
vars_ = vars
else:
raise ValueError("Cannot supply both vars and var_names arguments.")
vars = cast(TIterable[str], vars) # tell mypy that vars cannot be None here.
if random_seed is not None:
np.random.seed(random_seed)
names = get_default_varnames(vars_, include_transformed=False)
# draw_values fails with auto-transformed variables. transform them later!
values = draw_values([model[name] for name in names], size=samples)
data = {k: v for k, v in zip(names, values)}
if data is None:
raise AssertionError("No variables sampled: attempting to sample %s" % names)
prior = {} # type: Dict[str, np.ndarray]
for var_name in vars:
if var_name in data:
prior[var_name] = data[var_name]
elif is_transformed_name(var_name):
untransformed = get_untransformed_name(var_name)
if untransformed in data:
prior[var_name] = model[untransformed].transformation.forward_val(
data[untransformed]
)
return prior
def init_nuts(
init="auto", chains=1, n_init=500000, model=None, random_seed=None, progressbar=True, **kwargs,
):
"""Set up the mass matrix initialization for NUTS.
NUTS convergence and sampling speed is extremely dependent on the
choice of mass/scaling matrix. This function implements different
methods for choosing or adapting the mass matrix.
Parameters
----------
init : str
Initialization method to use.
* auto: Choose a default initialization method automatically.
Currently, this is `'jitter+adapt_diag'`, but this can change in the future. If you
depend on the exact behaviour, choose an initialization method explicitly.
* adapt_diag: Start with a identity mass matrix and then adapt a diagonal based on the
variance of the tuning samples. All chains use the test value (usually the prior mean)
as starting point.
* jitter+adapt_diag: Same as ``adapt_diag``, but use test value plus a uniform jitter in
[-1, 1] as starting point in each chain.
* advi+adapt_diag: Run ADVI and then adapt the resulting diagonal mass matrix based on the
sample variance of the tuning samples.
* advi+adapt_diag_grad: Run ADVI and then adapt the resulting diagonal mass matrix based
on the variance of the gradients during tuning. This is **experimental** and might be
removed in a future release.
* advi: Run ADVI to estimate posterior mean and diagonal mass matrix.
* advi_map: Initialize ADVI with MAP and use MAP as starting point.
* map: Use the MAP as starting point. This is discouraged.
* adapt_full: Adapt a dense mass matrix using the sample covariances. All chains use the
test value (usually the prior mean) as starting point.
* jitter+adapt_full: Same as ``adapt_full`, but use test value plus a uniform jitter in
[-1, 1] as starting point in each chain.
chains : int
Number of jobs to start.
n_init : int
Number of iterations of initializer. Only works for 'ADVI' init methods.
model : Model (optional if in ``with`` context)
progressbar : bool
Whether or not to display a progressbar for advi sampling.
**kwargs : keyword arguments
Extra keyword arguments are forwarded to pymc3.NUTS.
Returns
-------
start : ``pymc3.model.Point``
Starting point for sampler
nuts_sampler : ``pymc3.step_methods.NUTS``
Instantiated and initialized NUTS sampler object
"""
model = modelcontext(model)
vars = kwargs.get("vars", model.vars)
if set(vars) != set(model.vars):
raise ValueError("Must use init_nuts on all variables of a model.")
if not all_continuous(vars):
raise ValueError("init_nuts can only be used for models with only " "continuous variables.")
if not isinstance(init, str):
raise TypeError("init must be a string.")
if init is not None:
init = init.lower()
if init == "auto":
init = "jitter+adapt_diag"
_log.info("Initializing NUTS using {}...".format(init))
if random_seed is not None:
random_seed = int(np.atleast_1d(random_seed)[0])
np.random.seed(random_seed)
cb = [
pm.callbacks.CheckParametersConvergence(tolerance=1e-2, diff="absolute"),
pm.callbacks.CheckParametersConvergence(tolerance=1e-2, diff="relative"),
]
if init == "adapt_diag":
start = [model.test_point] * chains
mean = np.mean([model.dict_to_array(vals) for vals in start], axis=0)
var = np.ones_like(mean)
potential = quadpotential.QuadPotentialDiagAdapt(model.ndim, mean, var, 10)
elif init == "jitter+adapt_diag":
start = []
for _ in range(chains):
mean = {var: val.copy() for var, val in model.test_point.items()}
for val in mean.values():
val[...] += 2 * np.random.rand(*val.shape) - 1
start.append(mean)
mean = np.mean([model.dict_to_array(vals) for vals in start], axis=0)
var = np.ones_like(mean)
potential = quadpotential.QuadPotentialDiagAdapt(model.ndim, mean, var, 10)
elif init == "advi+adapt_diag_grad":
approx = pm.fit(
random_seed=random_seed,
n=n_init,
method="advi",
model=model,
callbacks=cb,
progressbar=progressbar,
obj_optimizer=pm.adagrad_window,
) # type: pm.MeanField
start = approx.sample(draws=chains)
start = list(start)
stds = approx.bij.rmap(approx.std.eval())
cov = model.dict_to_array(stds) ** 2
mean = approx.bij.rmap(approx.mean.get_value())
mean = model.dict_to_array(mean)
weight = 50
potential = quadpotential.QuadPotentialDiagAdaptGrad(model.ndim, mean, cov, weight)
elif init == "advi+adapt_diag":
approx = pm.fit(
random_seed=random_seed,
n=n_init,
method="advi",
model=model,
callbacks=cb,
progressbar=progressbar,
obj_optimizer=pm.adagrad_window,
) # type: pm.MeanField
start = approx.sample(draws=chains)
start = list(start)
stds = approx.bij.rmap(approx.std.eval())
cov = model.dict_to_array(stds) ** 2
mean = approx.bij.rmap(approx.mean.get_value())
mean = model.dict_to_array(mean)
weight = 50
potential = quadpotential.QuadPotentialDiagAdapt(model.ndim, mean, cov, weight)
elif init == "advi":
approx = pm.fit(
random_seed=random_seed,
n=n_init,
method="advi",
model=model,
callbacks=cb,
progressbar=progressbar,
obj_optimizer=pm.adagrad_window,
) # type: pm.MeanField
start = approx.sample(draws=chains)
start = list(start)
stds = approx.bij.rmap(approx.std.eval())
cov = model.dict_to_array(stds) ** 2
potential = quadpotential.QuadPotentialDiag(cov)
elif init == "advi_map":
start = pm.find_MAP(include_transformed=True)
approx = pm.MeanField(model=model, start=start)
pm.fit(
random_seed=random_seed,
n=n_init,
method=pm.KLqp(approx),
callbacks=cb,
progressbar=progressbar,
obj_optimizer=pm.adagrad_window,
)
start = approx.sample(draws=chains)
start = list(start)
stds = approx.bij.rmap(approx.std.eval())
cov = model.dict_to_array(stds) ** 2
potential = quadpotential.QuadPotentialDiag(cov)
elif init == "map":
start = pm.find_MAP(include_transformed=True)
cov = pm.find_hessian(point=start)
start = [start] * chains
potential = quadpotential.QuadPotentialFull(cov)
elif init == "adapt_full":
start = [model.test_point] * chains
mean = np.mean([model.dict_to_array(vals) for vals in start], axis=0)
cov = np.eye(model.ndim)
potential = quadpotential.QuadPotentialFullAdapt(model.ndim, mean, cov, 10)
elif init == "jitter+adapt_full":
start = []
for _ in range(chains):
mean = {var: val.copy() for var, val in model.test_point.items()}
for val in mean.values():
val[...] += 2 * np.random.rand(*val.shape) - 1
start.append(mean)
mean = np.mean([model.dict_to_array(vals) for vals in start], axis=0)
cov = np.eye(model.ndim)
potential = quadpotential.QuadPotentialFullAdapt(model.ndim, mean, cov, 10)
else:
raise ValueError("Unknown initializer: {}.".format(init))
step = pm.NUTS(potential=potential, model=model, **kwargs)
return start, step
| 36.804867
| 134
| 0.622921
|
4a061e32da2df005310843b42da0ecb2fed7b711
| 6,146
|
py
|
Python
|
tests/unit/bokeh/server/test_util.py
|
Suicoleiro/bokeh
|
a212acdf091a7a4df639fa9d443be6ade0018039
|
[
"BSD-3-Clause"
] | 15,193
|
2015-01-01T05:11:45.000Z
|
2022-03-31T19:30:20.000Z
|
tests/unit/bokeh/server/test_util.py
|
Suicoleiro/bokeh
|
a212acdf091a7a4df639fa9d443be6ade0018039
|
[
"BSD-3-Clause"
] | 9,554
|
2015-01-01T03:16:54.000Z
|
2022-03-31T22:59:39.000Z
|
tests/unit/bokeh/server/test_util.py
|
Suicoleiro/bokeh
|
a212acdf091a7a4df639fa9d443be6ade0018039
|
[
"BSD-3-Clause"
] | 4,829
|
2015-01-02T03:35:32.000Z
|
2022-03-30T16:40:26.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import socket
# Module under test
import bokeh.server.util as util # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_bind_sockets_with_zero_port() -> None:
ss, port = util.bind_sockets("127.0.0.1", 0)
assert isinstance(ss, list)
assert len(ss) == 1
assert isinstance(ss[0], socket.socket)
assert isinstance(port, int)
def test_check_allowlist_rejects_port_mismatch() -> None:
assert False == util.check_allowlist("foo:100", ["foo:101", "foo:102"])
def test_check_allowlist_rejects_name_mismatch() -> None:
assert False == util.check_allowlist("foo:100", ["bar:100", "baz:100"])
def test_check_allowlist_accepts_name_port_match() -> None:
assert True == util.check_allowlist("foo:100", ["foo:100", "baz:100"])
def test_check_allowlist_accepts_implicit_port_80() -> None:
assert True == util.check_allowlist("foo", ["foo:80"])
def test_check_allowlist_accepts_all_on_star() -> None:
assert True == util.check_allowlist("192.168.0.1", ['*'])
assert True == util.check_allowlist("192.168.0.1:80", ['*'])
assert True == util.check_allowlist("192.168.0.1:5006", ['*'])
assert True == util.check_allowlist("192.168.0.1:80", ['*:80'])
assert False == util.check_allowlist("192.168.0.1:80", ['*:81'])
assert True == util.check_allowlist("192.168.0.1:5006", ['*:*'])
assert True == util.check_allowlist("192.168.0.1", ['192.168.0.*'])
assert True == util.check_allowlist("192.168.0.1:5006", ['192.168.0.*'])
assert False == util.check_allowlist("192.168.1.1", ['192.168.0.*'])
assert True == util.check_allowlist("foobarbaz", ['*'])
assert True == util.check_allowlist("192.168.0.1", ['192.168.0.*'])
assert False == util.check_allowlist("192.168.1.1", ['192.168.0.*'])
assert False == util.check_allowlist("192.168.0.1", ['192.168.0.*:5006'])
assert True == util.check_allowlist("192.168.0.1", ['192.168.0.*:80'])
assert True == util.check_allowlist("foobarbaz", ['*'])
assert True == util.check_allowlist("foobarbaz", ['*:*'])
assert True == util.check_allowlist("foobarbaz", ['*:80'])
assert False == util.check_allowlist("foobarbaz", ['*:5006'])
assert True == util.check_allowlist("foobarbaz:5006", ['*'])
assert True == util.check_allowlist("foobarbaz:5006", ['*:*'])
assert True == util.check_allowlist("foobarbaz:5006", ['*:5006'])
def test_create_hosts_allowlist_no_host() -> None:
hosts = util.create_hosts_allowlist(None, 1000)
assert hosts == ["localhost:1000"]
hosts = util.create_hosts_allowlist([], 1000)
assert hosts == ["localhost:1000"]
def test_create_hosts_allowlist_host_value_with_port_use_port() -> None:
hosts = util.create_hosts_allowlist(["foo:1000"], 1000)
assert hosts == ["foo:1000"]
hosts = util.create_hosts_allowlist(["foo:1000","bar:2100"], 1000)
assert hosts == ["foo:1000","bar:2100"]
def test_create_hosts_allowlist_host_without_port_use_port_80() -> None:
hosts = util.create_hosts_allowlist(["foo"], 1000)
assert hosts == ["foo:80"]
hosts = util.create_hosts_allowlist(["foo","bar"], 1000)
assert hosts == ["foo:80","bar:80"]
def test_create_hosts_allowlist_host_non_int_port_raises() -> None:
with pytest.raises(ValueError):
util.create_hosts_allowlist(["foo:xyz"], 1000)
def test_create_hosts_allowlist_bad_host_raises() -> None:
with pytest.raises(ValueError):
util.create_hosts_allowlist([""], 1000)
with pytest.raises(ValueError):
util.create_hosts_allowlist(["a:b:c"], 1000)
with pytest.raises(ValueError):
util.create_hosts_allowlist([":80"], 1000)
def test_match_host() -> None:
assert util.match_host('192.168.0.1:80', '192.168.0.1:80') == True
assert util.match_host('192.168.0.1:80', '192.168.0.1') == True
assert util.match_host('192.168.0.1:80', '192.168.0.1:8080') == False
assert util.match_host('192.168.0.1', '192.168.0.2') == False
assert util.match_host('192.168.0.1', '192.168.*.*') == True
assert util.match_host('alice', 'alice') == True
assert util.match_host('alice:80', 'alice') == True
assert util.match_host('alice', 'bob') == False
assert util.match_host('foo.example.com', 'foo.example.com.net') == False
assert util.match_host('alice', '*') == True
assert util.match_host('alice', '*:*') == True
assert util.match_host('alice:80', '*') == True
assert util.match_host('alice:80', '*:80') == True
assert util.match_host('alice:8080', '*:80') == False
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 44.861314
| 81
| 0.52587
|
4a061e874cefe709928978e63644b9bea854685c
| 521
|
py
|
Python
|
examples/alias_startup.py
|
rpavani1998/cmd2
|
77d9015986bca909aae9181e2d72d0d835aeaa09
|
[
"MIT"
] | null | null | null |
examples/alias_startup.py
|
rpavani1998/cmd2
|
77d9015986bca909aae9181e2d72d0d835aeaa09
|
[
"MIT"
] | null | null | null |
examples/alias_startup.py
|
rpavani1998/cmd2
|
77d9015986bca909aae9181e2d72d0d835aeaa09
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
"""A simple example demonstrating the following:
1) How to add custom command aliases using the alias command
2) How to load an initialization script at startup
"""
import cmd2
class AliasAndStartup(cmd2.Cmd):
""" Example cmd2 application where we create commands that just print the arguments they are called with."""
def __init__(self):
super().__init__(startup_script='.cmd2rc')
if __name__ == '__main__':
app = AliasAndStartup()
app.cmdloop()
| 26.05
| 112
| 0.710173
|
4a061f7139033bbae2e12a8c1a7e9c889dcc0bd8
| 7,195
|
py
|
Python
|
tracpro/polls/tests/test_views.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 5
|
2015-07-21T15:58:31.000Z
|
2019-09-14T22:34:00.000Z
|
tracpro/polls/tests/test_views.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 197
|
2015-03-24T15:26:04.000Z
|
2017-11-28T19:24:37.000Z
|
tracpro/polls/tests/test_views.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 10
|
2015-03-24T12:26:36.000Z
|
2017-02-21T13:08:57.000Z
|
# coding=utf-8
from __future__ import absolute_import, unicode_literals
import csv
import datetime
from StringIO import StringIO
import pytz
from django.core.urlresolvers import reverse
from tracpro.test.cases import TracProDataTest
from ..models import Response
from . import factories
class PollCRUDLTest(TracProDataTest):
def test_list(self):
url = reverse('polls.poll_list')
# log in as admin
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 1)
class ResponseCRUDLTest(TracProDataTest):
def setUp(self):
super(ResponseCRUDLTest, self).setUp()
date0 = datetime.datetime(2014, 1, 1, 6, 59, 59, tzinfo=pytz.UTC) # Slightly earlier than date1
date1 = datetime.datetime(2014, 1, 1, 7, tzinfo=pytz.UTC)
date2 = datetime.datetime(2014, 1, 1, 8, tzinfo=pytz.UTC)
date3 = datetime.datetime(2014, 1, 2, 7, tzinfo=pytz.UTC)
self.mock_temba_client.create_flow_start.return_value = []
# create non-regional pollrun with 4 responses (2 complete, 1 partial, 1 empty)
self.pollrun1 = factories.UniversalPollRun(
poll=self.poll1, conducted_on=date1)
pollrun1_r0 = factories.Response( # Contact's first response
pollrun=self.pollrun1, contact=self.contact1,
created_on=date0, updated_on=date0,
status=Response.STATUS_COMPLETE,
is_active=False, # not active because it gets superseded
)
# This answer is different than the next time they answer it
factories.Answer(
response=pollrun1_r0, question=self.poll1_question1,
value="2.0000", category="1 - 10", submitted_on=date0)
factories.Answer(
response=pollrun1_r0, question=self.poll1_question2,
value="Cloudy", category="All Responses", submitted_on=date0)
# Same contact's second response to same poll, slightly later
self.pollrun1_r1 = factories.Response( # Supersedes the one on date0, for most purposes
pollrun=self.pollrun1, contact=self.contact1,
created_on=date1, updated_on=date1,
status=Response.STATUS_COMPLETE)
factories.Answer(
response=self.pollrun1_r1, question=self.poll1_question1,
value="5.0000", category="1 - 10", submitted_on=date1)
factories.Answer(
response=self.pollrun1_r1, question=self.poll1_question2,
value="Sunny", category="All Responses", submitted_on=date1)
# Another contact's response
self.pollrun1_r2 = factories.Response(
pollrun=self.pollrun1, contact=self.contact2,
created_on=date2, updated_on=date2,
status=Response.STATUS_PARTIAL)
factories.Answer(
response=self.pollrun1_r2, question=self.poll1_question1,
value="6.0000", category="1 - 10", submitted_on=date2)
# Another contact's response
self.pollrun1_r3 = factories.Response(
pollrun=self.pollrun1, contact=self.contact4,
created_on=date3, updated_on=date3,
status=Response.STATUS_EMPTY)
# create regional pollrun with 1 incomplete response
self.pollrun2 = factories.RegionalPollRun(
poll=self.poll1, region=self.region1, conducted_on=date3)
self.pollrun2_r1 = factories.Response(
pollrun=self.pollrun2, contact=self.contact1,
created_on=date3, updated_on=date3,
status=Response.STATUS_PARTIAL)
def test_by_pollrun(self):
url = reverse('polls.response_by_pollrun', args=[self.pollrun1.pk])
# log in as admin
self.login(self.admin)
# view responses for pollrun #1
response = self.url_get('unicef', url)
self.assertContains(response, "Number of sheep", status_code=200)
self.assertContains(response, "How is the weather?")
responses = list(response.context['object_list'])
self.assertEqual(len(responses), 2)
# newest non-empty first
self.assertEqual(responses, [self.pollrun1_r2, self.pollrun1_r1])
# can't restart from "All Regions" view of responses
self.assertFalse(response.context['can_restart'])
self.switch_region(self.region1)
# can't restart as there is a later pollrun of the same poll in region #1
response = self.url_get('unicef', url)
self.assertFalse(response.context['can_restart'])
self.switch_region(self.region2)
# can restart as this is the latest pollrun of this poll in region #2
response = self.url_get('unicef', url)
self.assertTrue(response.context['can_restart'])
def test_by_contact(self):
# log in as admin
self.login(self.admin)
# view responses for contact #1
url = reverse('polls.response_by_contact', args=[self.contact1.pk])
response = self.url_get('unicef', url)
responses = list(response.context['object_list'])
self.assertEqual(len(responses), 2)
# newest non-empty first
self.assertEqual(responses, [self.pollrun2_r1, self.pollrun1_r1])
def test_fetching_pollruns_csv(self):
# log in as admin
self.login(self.admin)
url = reverse('polls.response_by_pollrun', args=[self.pollrun1.pk]) + "?_format=csv"
response = self.url_get('unicef', url)
self.assertEqual(200, response.status_code)
self.assertEqual('text/csv', response['Content-Type'])
rows = [[element.decode('utf-8') for element in row]
for row in csv.reader(StringIO(response.content.decode('utf-8')))]
self.assertEqual(rows[0], ['Date', 'Name', 'URN', 'Panel', 'Cohorts', 'Number of sheep', 'How is the weather?'])
self.assertEqual(rows[1], [
'Jan 01, 2014 12:30',
'Bob',
'tel:2345',
'Kandahar',
'Farmers, Kandahar',
'6.0000',
'',
])
self.assertEqual(rows[2], [
'Jan 01, 2014 11:30',
'Ann',
'tel:1234',
'Kandahar',
'Farmers, Teachers',
'5.0000',
'Sunny',
])
self.assertEqual(rows[3], [
'Jan 01, 2014 11:29',
'Ann',
'tel:1234',
'Kandahar',
'Farmers, Teachers',
'2.0000',
'Cloudy',
])
self.assertEqual(4, len(rows))
| 40.195531
| 120
| 0.576233
|
4a061f9322b9e372c4baf60148a42dc6736985f0
| 6,066
|
py
|
Python
|
kart/__init__.py
|
giacomocaironi/Kart
|
29db924c69e679a19c508a5e41cc3ef3689e1a71
|
[
"MIT"
] | 3
|
2020-03-24T17:02:20.000Z
|
2021-07-21T11:01:06.000Z
|
kart/__init__.py
|
giacomocaironi/Kart
|
29db924c69e679a19c508a5e41cc3ef3689e1a71
|
[
"MIT"
] | 2
|
2020-09-13T16:18:29.000Z
|
2021-03-21T14:43:00.000Z
|
kart/__init__.py
|
giacomocaironi/Kart
|
29db924c69e679a19c508a5e41cc3ef3689e1a71
|
[
"MIT"
] | null | null | null |
import argparse
import fnmatch
import shutil
import threading
import traceback
from copy import deepcopy
from http.server import HTTPServer
from pathlib import Path
from kart.utils import KartMap, KartObserver, KartRequestHandler, merge_dicts
class Kart:
"""Main Kart class"""
def __init__(
self,
miners: list = [],
content_modifiers: list = [],
mappers: list = [],
map_modifiers: list = [],
renderers: list = [],
config: dict = {},
build_location: str = "_site",
):
self.miners = miners
self.content_modifiers = content_modifiers
self.mappers = mappers
self.map_modifiers = map_modifiers
self.renderers = renderers
self.config = config
self.build_location = Path(build_location)
self.lock = threading.Lock()
def check_config(self):
"""Checks if the config has all the necessary fields and sets them to default values if not"""
default = {
"name": "Example",
"site_url": "https://example.org",
"pagination": {"per_page": 5, "skip": 0},
"timezone": "UTC",
"serving": False,
}
merge_dicts(self.config, default)
def mine_data(self, start: bool = True):
"""Calls miners and content modifiers"""
self.site = {}
for miner in self.miners:
if start:
miner.read_data(self.config)
self.site.update(miner.collect(self.config))
for modifier in self.content_modifiers:
modifier.modify(self.config, self.site)
def create_map(self):
"""Calls mappers and map modifiers"""
self.map = KartMap(site_url=self.config["site_url"])
for mapper in self.mappers:
self.map.update(mapper.map(self.config, self.site))
for modifier in self.map_modifiers:
modifier.modify(self.config, self.site, self.map)
def write(self):
"""Calls renderers"""
for renderer in self.renderers:
renderer.render(self.config, self.site, self.map, self.build_location)
def build(self):
"""Build the entire site"""
self.check_config()
self.mine_data()
self.create_map()
shutil.rmtree(self.build_location, ignore_errors=True)
self.build_location.mkdir(parents=True, exist_ok=True)
self.write()
# _site and _map are set and retrieved with a threading lock to prevent data races
# _site and _map are set only when the creation of the map has finished
# therefore it is not possible to access only partial data,
# preventing errors when serving the site during development
def update_data(self):
"""Update the site data after a file has been changed"""
self.mine_data(False)
self.create_map()
_site = deepcopy(self.site)
_map = deepcopy(self.map)
_urls = {}
_regexes = {}
for slug, page in self.map.items():
_urls[page["url"]] = slug
if "*" in page["url"] or "?" in page["url"]:
_regexes[page["url"]] = slug
with self.lock:
self._site = _site
self._map = _map
self._urls = _urls
self._regexes = _regexes
def serve_page(self, handler, url: str):
"""Serve a single page"""
with self.lock:
site_map = self._map
urls = self._urls
site = self._site
regexes = self._regexes
if url in urls:
page = site_map[urls[url]]
else:
try:
pattern = next((x for x in regexes if fnmatch.fnmatch(url, x)))
page = site_map[regexes[pattern]]
except StopIteration:
page = None
if page:
renderer = self.renderer_dict[page["renderer"]]
renderer.serve(handler, page, self.config, site, site_map)
def serve(self, port: int = 9000):
"""Main loop for serving the site"""
self.check_config()
self.renderer_dict = {}
observer = KartObserver(action=self.update_data)
for miner in self.miners:
miner.start_watching(self.config, observer)
observer.start()
for renderer in self.renderers:
self.renderer_dict[renderer.name] = renderer
renderer.start_serving(self.config)
handler_class = KartRequestHandler
handler_class.action = self.serve_page
httpd = HTTPServer(("", port), handler_class)
self.update_data()
shutil.rmtree(self.build_location, ignore_errors=True)
while True:
try:
httpd.handle_request()
except KeyboardInterrupt:
break
except Exception:
print(traceback.format_exc())
print("\rexiting")
for miner in self.miners:
miner.stop_watching(self.config)
for renderer in self.renderers:
renderer.stop_serving(self.config)
observer.stop()
observer.join()
def run(self):
"""Starts the kart execution. See --help for more information"""
parser = argparse.ArgumentParser()
parser.add_argument(
"command", help="command to execute", choices={"build", "serve"}
)
parser.add_argument(
"-p", "--port", help="port to bind to", default=9000, type=int
)
parser.add_argument(
"--dev-url",
help="serve your site on a different url",
type=str,
)
args = parser.parse_args()
if args.command == "build":
self.config["serving"] = False
self.build()
if args.command == "serve":
self.config["serving"] = True
if args.dev_url:
self.config["site_url"] = args.dev_url
else:
self.config["site_url"] = f"http://localhost:{args.port}"
self.serve(args.port)
| 34.078652
| 102
| 0.576162
|
4a06206d8c8943ba6035bf4bd97e05a4bf6fe76b
| 2,349
|
py
|
Python
|
nets/autoencoder.py
|
mepittma/bmi203-final
|
ef60d91cafbd3372f13917aa67102ec8f19e7ee8
|
[
"Apache-2.0"
] | null | null | null |
nets/autoencoder.py
|
mepittma/bmi203-final
|
ef60d91cafbd3372f13917aa67102ec8f19e7ee8
|
[
"Apache-2.0"
] | null | null | null |
nets/autoencoder.py
|
mepittma/bmi203-final
|
ef60d91cafbd3372f13917aa67102ec8f19e7ee8
|
[
"Apache-2.0"
] | null | null | null |
# Implement an 8x3x8 autoencoder. This neural network should take a matrix
# input and returns the same matrix as an output.
# First, represent the neural network as a list of layers, where each layer in
# the network is represented as a class with a weight matrix, bias vector,
# activation function, function's derivative.
import numpy as np
np.random.seed(1)
# Sigmoid function (from https://iamtrask.github.io/2015/07/12/basic-python-network/)
def sigmoid(x, deriv = False):
if deriv == True:
return x*(1-x)
return 1/(1+np.exp(-x))
# Train a neural network with three layers, given input and output
def create_nn(X, y, gamma, n_iter=60000):
ncol = len(X[0])
nrow = len(X)
# Initialize weights connecting layer 1 to layer 2, 2 to 3
w1_2 = 2*np.random.random((ncol,nrow)) - 1
w2_3 = 2*np.random.random((nrow,1)) - 1
# Initialize biases
bias_1 = 1.0
bias_2 = 1.0
# Initialize output nodes
l0 = np.array(X)
l1 = sigmoid(np.dot(l0,w1_2))
l2 = sigmoid(np.dot(l1,w2_3))
for j in range(int(n_iter)):
# Forward propogation: equal to the sigmoid of the dot product of previous layer and weights
l1 = sigmoid(np.dot(l0,w1_2)) #+ bias_1
l2 = sigmoid(np.dot(l1,w2_3)) #+ bias_2
# Calculate the error and amount to alter weights
l2_error = y - l2
l2_delta = l2_error*sigmoid(l2,deriv=True)
l1_error = y - l1
l1_delta = l1_error*sigmoid(l1,deriv=True)
# Update weights and biases
w1_2 -= gamma * l0.T.dot(l1_delta)
w2_3 -= gamma * l1.T.dot(l2_delta)
#bias_1 -= gamma * l1_delta
#bias_2 -= gamma * l2_delta
# Print error value every 10,000 iterations
if j%10000 == 0:
print( "Error after {} iterations: {}".format(j,l2_error))
# Return the output layer
return(l2)
# Function to test the input/output of a binary test case
def auto_test(X,y,gamma=0.1,n_iter=60000):
print("Input vector: ", X)
l2 = create_nn(X,y,n_iter,gamma)
# Round each value in the output layer to 0 or 1
output = [[round(number) for number in row] for row in l2]
print("Output vector: ", output)
return(output)
test_vec = [[0],[0],[0],[0],[0],[0],[1],[0]]
output = create_nn(test_vec, test_vec, gamma = .01)
auto_test(test_vec, test_vec)
| 30.115385
| 100
| 0.642401
|
4a06207ba60a7a4e5903af22eab19cfa826554ab
| 9,749
|
py
|
Python
|
commands.py
|
ShruthiChari/whyis
|
fcfb6a205c637eaf738babfbfac0bc537c6379bc
|
[
"Apache-2.0"
] | null | null | null |
commands.py
|
ShruthiChari/whyis
|
fcfb6a205c637eaf738babfbfac0bc537c6379bc
|
[
"Apache-2.0"
] | null | null | null |
commands.py
|
ShruthiChari/whyis
|
fcfb6a205c637eaf738babfbfac0bc537c6379bc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
from flask_script import Command, Option, prompt_bool
from flask_security.utils import encrypt_password, verify_password, get_hmac
import flask
from base64 import b64encode
import os
import datetime
import rdflib
from nanopub import Nanopublication
from cookiecutter.main import cookiecutter
import tempfile
np = rdflib.Namespace("http://www.nanopub.org/nschema#")
def rando():
return b64encode(os.urandom(24)).decode('utf-8')
class Configure(Command):
'''Create a Whyis configuration and customization directory.'''
def get_options(self):
return [
]
def run(self, extension_directory=None, extension_name=None):
# Create project from the cookiecutter-pypackage/ template
extra_context = { 'SECRET_KEY':rando(), 'SECURITY_PASSWORD_SALT': rando() }
cookiecutter('config-template/', extra_context=extra_context)
class LoadNanopub(Command):
'''Add a nanopublication to the knowledge graph.'''
def get_options(self):
return [
Option('--input', '-i', dest='input_file',
type=str),
Option('--format', '-f', dest='file_format',
type=str),
Option('--revises', '-r', dest='was_revision_of',
type=str),
]
def run(self, input_file, file_format="trig", was_revision_of=None):
if was_revision_of is not None:
wasRevisionOf = set(flask.current_app.db.objects(predicate=np.hasAssertion,
subject=rdflib.URIRef(was_revision_of)))
if len(wasRevisionOf) == 0:
print "Could not find active nanopublication to revise:", was_revision_of
return
was_revision_of = wasRevisionOf
g = rdflib.ConjunctiveGraph(identifier=rdflib.BNode().skolemize(), store="Sleepycat")
graph_tempdir = tempfile.mkdtemp()
g.store.open(graph_tempdir, True)
#g = rdflib.ConjunctiveGraph(identifier=rdflib.BNode().skolemize())
g1 = g.parse(location=input_file, format=file_format, publicID=flask.current_app.NS.local)
if len(list(g.subjects(rdflib.RDF.type, np.Nanopublication))) == 0:
print "Could not find existing nanopublications.", len(g1), len(g)
new_np = Nanopublication(store=g1.store)
new_np.add((new_np.identifier, rdflib.RDF.type, np.Nanopublication))
new_np.add((new_np.identifier, np.hasAssertion, g1.identifier))
new_np.add((g1.identifier, rdflib.RDF.type, np.Assertion))
nanopub_prepare_graph = rdflib.ConjunctiveGraph(store="Sleepycat")
nanopub_prepare_graph_tempdir = tempfile.mkdtemp()
nanopub_prepare_graph.store.open(nanopub_prepare_graph_tempdir, True)
nanopubs = []
for npub in flask.current_app.nanopub_manager.prepare(g, store=nanopub_prepare_graph.store):
if was_revision_of is not None:
for r in was_revision_of:
print "Marking as revision of", r
npub.pubinfo.add((npub.assertion.identifier, flask.current_app.NS.prov.wasRevisionOf, r))
print 'Prepared', npub.identifier
nanopubs.append(npub)
flask.current_app.nanopub_manager.publish(*nanopubs)
print "Published", npub.identifier
class RetireNanopub(Command):
'''Retire a nanopublication from the knowledge graph.'''
def get_options(self):
return [
Option('--nanopub_uri', '-n', dest='nanopub_uri',
type=str),
]
def run(self, nanopub_uri):
flask.current_app.nanopub_manager.retire(nanopub_uri)
class TestAgent(Command):
'''Add a nanopublication to the knowledge graph.'''
def get_options(self):
return [
Option('--agent', '-a', dest='agent_path',
type=str),
Option('--dry-run', '-d', action="store_true", dest='dry_run'),
]
def run(self, agent_path, dry_run=False):
app = flask.current_app
from pydoc import locate
agent_class = locate(agent_path)
agent = agent_class()
agent.dry_run = dry_run
if agent.dry_run:
print "Dry run, not storing agent output."
agent.app = app
print agent.get_query()
results = []
if agent.query_predicate == app.NS.whyis.globalChangeQuery:
results.extend(agent.process_graph(app.db))
else:
for resource in agent.getInstances(app.db):
for np_uri, in app.db.query('''select ?np where {
graph ?assertion { ?e ?p ?o.}
?np a np:Nanopublication;
np:hasAssertion ?assertion.
}''', initBindings={'e': resource.identifier}, initNs=app.NS.prefixes):
np = app.nanopub_manager.get(np_uri)
results.extend(agent.process_graph(np))
for np in results:
print np.serialize(format="trig")
class UpdateUser(Command):
"""Update a user in Whyis"""
def get_options(self):
return [
Option('--email', '-e', dest='email',
type=str),
Option('--password', '-p', dest='password',
type=str),
Option('--fn', '-f', dest='fn',
type=str),
Option('--ln', '-l', dest='ln',
type=str),
Option('--username', '-u', dest='identifier', type=str),
Option('--add-roles', dest="add_roles", type=str),
Option('--remove-roles', dest="remove_roles", type=str)
]
def run(self, identifier, email, password, fn, ln, add_roles, remove_roles):
user = flask.current_app.datastore.get_user(identifier)
print "Modifying user", user.resUri
if password is not None:
verified = verify_password(password,encrypt_password(password))
if verified:
user.password = encrypt_password(password)
else:
"User password not verified."
roles = set(user.roles)
if add_roles is not None:
for r in add_roles.split(','):
role = flask.current_app.datastore.find_or_create_role(name=r)
roles.add(role)
if remove_roles is not None:
for r in remove_roles.split(','):
role = flask.current_app.datastore.find_or_create_role(name=r)
roles.remove(role)
user.roles = list(roles)
if email is not None:
user.email = email
if fn is not None:
user.givenName = fn
if ln is not None:
user.familyName = ln
flask.current_app.datastore.commit()
print "Updated user: %s" % (user, )
class CreateUser(Command):
"""Add a user to Whyis"""
def get_options(self):
return [
Option('--email', '-e', dest='email',
type=str),
Option('--password', '-p', dest='password',
type=str),
Option('--fn', '-f', dest='fn',
type=str),
Option('--ln', '-l', dest='ln',
type=str),
Option('--username', '-u', dest='identifier', type=str),
Option('--roles', dest="roles", type=str)
]
def run(self, email, password, fn, ln, identifier, roles=[]):
print 'Password verified:', verify_password(password,encrypt_password(password))
role_objects = []
if roles is not None:
role_objects = [flask.current_app.datastore.find_or_create_role(name=r) for r in roles.split(',')]
user = dict(identifier=identifier, email=email,
password=encrypt_password(password),
givenName=fn, familyName=ln,
confirmed_at = datetime.datetime.utcnow(), roles = role_objects)
user_obj = flask.current_app.datastore.create_user(**user)
print "Created user: %s (%s)" % (user, ', '.join([r.resUri for r in role_objects]))
class Test(Command):
"""
Run tests
"""
verbosity = 2
failfast = False
def get_options(self):
return [
Option('--verbosity', '-v', dest='verbose',
type=int, default=self.verbosity),
Option('--failfast', dest='failfast',
default=self.failfast, action='store_false')
]
def run(self, verbosity, failfast):
import sys
import glob
import unittest
exists = os.path.exists
isdir = os.path.isdir
join = os.path.join
project_path = os.path.abspath(os.path.dirname('.'))
sys.path.insert(0, project_path)
# our special folder for blueprints
if exists('apps'):
sys.path.insert(0, join('apps'))
loader = unittest.TestLoader()
all_tests = []
if exists('apps'):
for path in glob.glob('apps/*'):
if isdir(path):
tests_dir = join(path, 'tests')
if exists(join(path, 'tests.py')):
all_tests.append(loader.discover(path, 'tests.py'))
elif exists(tests_dir):
all_tests.append(loader.discover(tests_dir, pattern='test*.py'))
if exists('tests') and isdir('tests'):
all_tests.append(loader.discover('tests', pattern='test*.py'))
elif exists('tests.py'):
all_tests.append(loader.discover('.', pattern='tests.py'))
test_suite = unittest.TestSuite(all_tests)
unittest.TextTestRunner(
verbosity=verbosity, failfast=failfast).run(test_suite)
| 38.231373
| 110
| 0.582008
|
4a0620a471a216f68190b6fab3267d48495239ae
| 5,219
|
py
|
Python
|
spark_auto_mapper_fhir/backbone_elements/implementation_guide_resource.py
|
imranq2/SparkAutoMapper.FHIR
|
dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2
|
[
"Apache-2.0"
] | 1
|
2020-10-31T23:25:07.000Z
|
2020-10-31T23:25:07.000Z
|
spark_auto_mapper_fhir/backbone_elements/implementation_guide_resource.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
spark_auto_mapper_fhir/backbone_elements/implementation_guide_resource.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from spark_auto_mapper_fhir.fhir_types.boolean import FhirBoolean
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.string import FhirString
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.fhir_types.id import FhirId
from spark_auto_mapper_fhir.resources.resource import Resource
from spark_auto_mapper_fhir.base_types.fhir_backbone_element_base import (
FhirBackboneElementBase,
)
if TYPE_CHECKING:
pass
# id_ (string)
# extension (Extension)
# modifierExtension (Extension)
# reference (Reference)
from spark_auto_mapper_fhir.complex_types.reference import Reference
# Imports for References for reference
# fhirVersion (FHIRVersion)
from spark_auto_mapper_fhir.value_sets.fhir_version import FHIRVersionCode
# name (string)
# description (string)
# exampleBoolean (boolean)
# exampleCanonical (canonical)
from spark_auto_mapper_fhir.fhir_types.canonical import FhirCanonical
# groupingId (id)
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ImplementationGuideResource(FhirBackboneElementBase):
"""
ImplementationGuide.Resource
A set of rules of how a particular interoperability or standards problem is solved - typically through the use of FHIR resources. This resource is used to gather all the parts of an implementation guide into a logical whole and to publish a computable definition of all the parts.
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirString] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
modifierExtension: Optional[FhirList[ExtensionBase]] = None,
reference: Reference[Resource],
fhirVersion: Optional[FhirList[FHIRVersionCode]] = None,
name: Optional[FhirString] = None,
description: Optional[FhirString] = None,
exampleBoolean: Optional[FhirBoolean] = None,
exampleCanonical: Optional[FhirCanonical] = None,
groupingId: Optional[FhirId] = None,
) -> None:
"""
A set of rules of how a particular interoperability or standards problem is
solved - typically through the use of FHIR resources. This resource is used to
gather all the parts of an implementation guide into a logical whole and to
publish a computable definition of all the parts.
:param id_: None
:param extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
:param reference: Where this resource is found.
:param fhirVersion: Indicates the FHIR Version(s) this artifact is intended to apply to. If no
versions are specified, the resource is assumed to apply to all the versions
stated in ImplementationGuide.fhirVersion.
:param name: A human assigned name for the resource. All resources SHOULD have a name, but
the name may be extracted from the resource (e.g. ValueSet.name).
:param description: A description of the reason that a resource has been included in the
implementation guide.
:param exampleBoolean: None
:param exampleCanonical: None
:param groupingId: Reference to the id of the grouping this resource appears in.
"""
super().__init__(
id_=id_,
extension=extension,
modifierExtension=modifierExtension,
reference=reference,
fhirVersion=fhirVersion,
name=name,
description=description,
exampleBoolean=exampleBoolean,
exampleCanonical=exampleCanonical,
groupingId=groupingId,
)
| 48.324074
| 288
| 0.720253
|
4a0620b81f4861f68faf121bb247ba73e4fcf275
| 74,104
|
py
|
Python
|
venv/lib/python3.8/site-packages/vsts/gallery/v4_0/gallery_client.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/gallery/v4_0/gallery_client.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/gallery/v4_0/gallery_client.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class GalleryClient(VssClient):
"""Gallery
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(GalleryClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '69d21c00-f135-441b-b5ce-3626378e0819'
def share_extension_by_id(self, extension_id, account_name):
"""ShareExtensionById.
[Preview API]
:param str extension_id:
:param str account_name:
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
if account_name is not None:
route_values['accountName'] = self._serialize.url('account_name', account_name, 'str')
self._send(http_method='POST',
location_id='1f19631b-a0b4-4a03-89c2-d79785d24360',
version='4.0-preview.1',
route_values=route_values)
def unshare_extension_by_id(self, extension_id, account_name):
"""UnshareExtensionById.
[Preview API]
:param str extension_id:
:param str account_name:
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
if account_name is not None:
route_values['accountName'] = self._serialize.url('account_name', account_name, 'str')
self._send(http_method='DELETE',
location_id='1f19631b-a0b4-4a03-89c2-d79785d24360',
version='4.0-preview.1',
route_values=route_values)
def share_extension(self, publisher_name, extension_name, account_name):
"""ShareExtension.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str account_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if account_name is not None:
route_values['accountName'] = self._serialize.url('account_name', account_name, 'str')
self._send(http_method='POST',
location_id='a1e66d8f-f5de-4d16-8309-91a4e015ee46',
version='4.0-preview.1',
route_values=route_values)
def unshare_extension(self, publisher_name, extension_name, account_name):
"""UnshareExtension.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str account_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if account_name is not None:
route_values['accountName'] = self._serialize.url('account_name', account_name, 'str')
self._send(http_method='DELETE',
location_id='a1e66d8f-f5de-4d16-8309-91a4e015ee46',
version='4.0-preview.1',
route_values=route_values)
def get_acquisition_options(self, item_id, installation_target, test_commerce=None, is_free_or_trial_install=None):
"""GetAcquisitionOptions.
[Preview API]
:param str item_id:
:param str installation_target:
:param bool test_commerce:
:param bool is_free_or_trial_install:
:rtype: :class:`<AcquisitionOptions> <gallery.v4_0.models.AcquisitionOptions>`
"""
route_values = {}
if item_id is not None:
route_values['itemId'] = self._serialize.url('item_id', item_id, 'str')
query_parameters = {}
if installation_target is not None:
query_parameters['installationTarget'] = self._serialize.query('installation_target', installation_target, 'str')
if test_commerce is not None:
query_parameters['testCommerce'] = self._serialize.query('test_commerce', test_commerce, 'bool')
if is_free_or_trial_install is not None:
query_parameters['isFreeOrTrialInstall'] = self._serialize.query('is_free_or_trial_install', is_free_or_trial_install, 'bool')
response = self._send(http_method='GET',
location_id='9d0a0105-075e-4760-aa15-8bcf54d1bd7d',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('AcquisitionOptions', response)
def request_acquisition(self, acquisition_request):
"""RequestAcquisition.
[Preview API]
:param :class:`<ExtensionAcquisitionRequest> <gallery.v4_0.models.ExtensionAcquisitionRequest>` acquisition_request:
:rtype: :class:`<ExtensionAcquisitionRequest> <gallery.v4_0.models.ExtensionAcquisitionRequest>`
"""
content = self._serialize.body(acquisition_request, 'ExtensionAcquisitionRequest')
response = self._send(http_method='POST',
location_id='3adb1f2d-e328-446e-be73-9f6d98071c45',
version='4.0-preview.1',
content=content)
return self._deserialize('ExtensionAcquisitionRequest', response)
def get_asset_by_name(self, publisher_name, extension_name, version, asset_type, account_token=None, accept_default=None, **kwargs):
"""GetAssetByName.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:param str asset_type:
:param str account_token:
:param bool accept_default:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
if accept_default is not None:
query_parameters['acceptDefault'] = self._serialize.query('accept_default', accept_default, 'bool')
response = self._send(http_method='GET',
location_id='7529171f-a002-4180-93ba-685f358a0482',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_asset(self, extension_id, version, asset_type, account_token=None, accept_default=None, **kwargs):
"""GetAsset.
[Preview API]
:param str extension_id:
:param str version:
:param str asset_type:
:param str account_token:
:param bool accept_default:
:rtype: object
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
if accept_default is not None:
query_parameters['acceptDefault'] = self._serialize.query('accept_default', accept_default, 'bool')
response = self._send(http_method='GET',
location_id='5d545f3d-ef47-488b-8be3-f5ee1517856c',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_asset_authenticated(self, publisher_name, extension_name, version, asset_type, account_token=None, **kwargs):
"""GetAssetAuthenticated.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:param str asset_type:
:param str account_token:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
response = self._send(http_method='GET',
location_id='506aff36-2622-4f70-8063-77cce6366d20',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def associate_azure_publisher(self, publisher_name, azure_publisher_id):
"""AssociateAzurePublisher.
[Preview API]
:param str publisher_name:
:param str azure_publisher_id:
:rtype: :class:`<AzurePublisher> <gallery.v4_0.models.AzurePublisher>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if azure_publisher_id is not None:
query_parameters['azurePublisherId'] = self._serialize.query('azure_publisher_id', azure_publisher_id, 'str')
response = self._send(http_method='PUT',
location_id='efd202a6-9d87-4ebc-9229-d2b8ae2fdb6d',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('AzurePublisher', response)
def query_associated_azure_publisher(self, publisher_name):
"""QueryAssociatedAzurePublisher.
[Preview API]
:param str publisher_name:
:rtype: :class:`<AzurePublisher> <gallery.v4_0.models.AzurePublisher>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
response = self._send(http_method='GET',
location_id='efd202a6-9d87-4ebc-9229-d2b8ae2fdb6d',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('AzurePublisher', response)
def get_categories(self, languages=None):
"""GetCategories.
[Preview API]
:param str languages:
:rtype: [str]
"""
query_parameters = {}
if languages is not None:
query_parameters['languages'] = self._serialize.query('languages', languages, 'str')
response = self._send(http_method='GET',
location_id='e0a5a71e-3ac3-43a0-ae7d-0bb5c3046a2a',
version='4.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_category_details(self, category_name, languages=None, product=None):
"""GetCategoryDetails.
[Preview API]
:param str category_name:
:param str languages:
:param str product:
:rtype: :class:`<CategoriesResult> <gallery.v4_0.models.CategoriesResult>`
"""
route_values = {}
if category_name is not None:
route_values['categoryName'] = self._serialize.url('category_name', category_name, 'str')
query_parameters = {}
if languages is not None:
query_parameters['languages'] = self._serialize.query('languages', languages, 'str')
if product is not None:
query_parameters['product'] = self._serialize.query('product', product, 'str')
response = self._send(http_method='GET',
location_id='75d3c04d-84d2-4973-acd2-22627587dabc',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('CategoriesResult', response)
def get_category_tree(self, product, category_id, lcid=None, source=None, product_version=None, skus=None, sub_skus=None):
"""GetCategoryTree.
[Preview API]
:param str product:
:param str category_id:
:param int lcid:
:param str source:
:param str product_version:
:param str skus:
:param str sub_skus:
:rtype: :class:`<ProductCategory> <gallery.v4_0.models.ProductCategory>`
"""
route_values = {}
if product is not None:
route_values['product'] = self._serialize.url('product', product, 'str')
if category_id is not None:
route_values['categoryId'] = self._serialize.url('category_id', category_id, 'str')
query_parameters = {}
if lcid is not None:
query_parameters['lcid'] = self._serialize.query('lcid', lcid, 'int')
if source is not None:
query_parameters['source'] = self._serialize.query('source', source, 'str')
if product_version is not None:
query_parameters['productVersion'] = self._serialize.query('product_version', product_version, 'str')
if skus is not None:
query_parameters['skus'] = self._serialize.query('skus', skus, 'str')
if sub_skus is not None:
query_parameters['subSkus'] = self._serialize.query('sub_skus', sub_skus, 'str')
response = self._send(http_method='GET',
location_id='1102bb42-82b0-4955-8d8a-435d6b4cedd3',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ProductCategory', response)
def get_root_categories(self, product, lcid=None, source=None, product_version=None, skus=None, sub_skus=None):
"""GetRootCategories.
[Preview API]
:param str product:
:param int lcid:
:param str source:
:param str product_version:
:param str skus:
:param str sub_skus:
:rtype: :class:`<ProductCategoriesResult> <gallery.v4_0.models.ProductCategoriesResult>`
"""
route_values = {}
if product is not None:
route_values['product'] = self._serialize.url('product', product, 'str')
query_parameters = {}
if lcid is not None:
query_parameters['lcid'] = self._serialize.query('lcid', lcid, 'int')
if source is not None:
query_parameters['source'] = self._serialize.query('source', source, 'str')
if product_version is not None:
query_parameters['productVersion'] = self._serialize.query('product_version', product_version, 'str')
if skus is not None:
query_parameters['skus'] = self._serialize.query('skus', skus, 'str')
if sub_skus is not None:
query_parameters['subSkus'] = self._serialize.query('sub_skus', sub_skus, 'str')
response = self._send(http_method='GET',
location_id='31fba831-35b2-46f6-a641-d05de5a877d8',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ProductCategoriesResult', response)
def get_certificate(self, publisher_name, extension_name, version=None, **kwargs):
"""GetCertificate.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
response = self._send(http_method='GET',
location_id='e905ad6a-3f1f-4d08-9f6d-7d357ff8b7d0',
version='4.0-preview.1',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_extension_events(self, publisher_name, extension_name, count=None, after_date=None, include=None, include_property=None):
"""GetExtensionEvents.
[Preview API] Get install/uninstall events of an extension. If both count and afterDate parameters are specified, count takes precedence.
:param str publisher_name: Name of the publisher
:param str extension_name: Name of the extension
:param int count: Count of events to fetch, applies to each event type.
:param datetime after_date: Fetch events that occurred on or after this date
:param str include: Filter options. Supported values: install, uninstall, review, acquisition, sales. Default is to fetch all types of events
:param str include_property: Event properties to include. Currently only 'lastContactDetails' is supported for uninstall events
:rtype: :class:`<ExtensionEvents> <gallery.v4_0.models.ExtensionEvents>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if count is not None:
query_parameters['count'] = self._serialize.query('count', count, 'int')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
if include is not None:
query_parameters['include'] = self._serialize.query('include', include, 'str')
if include_property is not None:
query_parameters['includeProperty'] = self._serialize.query('include_property', include_property, 'str')
response = self._send(http_method='GET',
location_id='3d13c499-2168-4d06-bef4-14aba185dcd5',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ExtensionEvents', response)
def publish_extension_events(self, extension_events):
"""PublishExtensionEvents.
[Preview API] API endpoint to publish extension install/uninstall events. This is meant to be invoked by EMS only for sending us data related to install/uninstall of an extension.
:param [ExtensionEvents] extension_events:
"""
content = self._serialize.body(extension_events, '[ExtensionEvents]')
self._send(http_method='POST',
location_id='0bf2bd3a-70e0-4d5d-8bf7-bd4a9c2ab6e7',
version='4.0-preview.1',
content=content)
def query_extensions(self, extension_query, account_token=None):
"""QueryExtensions.
[Preview API]
:param :class:`<ExtensionQuery> <gallery.v4_0.models.ExtensionQuery>` extension_query:
:param str account_token:
:rtype: :class:`<ExtensionQueryResult> <gallery.v4_0.models.ExtensionQueryResult>`
"""
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
content = self._serialize.body(extension_query, 'ExtensionQuery')
response = self._send(http_method='POST',
location_id='eb9d5ee1-6d43-456b-b80e-8a96fbc014b6',
version='4.0-preview.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('ExtensionQueryResult', response)
def create_extension(self, upload_stream, **kwargs):
"""CreateExtension.
[Preview API]
:param object upload_stream: Stream to upload
:rtype: :class:`<PublishedExtension> <gallery.v4_0.models.PublishedExtension>`
"""
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='POST',
location_id='a41192c8-9525-4b58-bc86-179fa549d80d',
version='4.0-preview.2',
content=content,
media_type='application/octet-stream')
return self._deserialize('PublishedExtension', response)
def delete_extension_by_id(self, extension_id, version=None):
"""DeleteExtensionById.
[Preview API]
:param str extension_id:
:param str version:
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
query_parameters = {}
if version is not None:
query_parameters['version'] = self._serialize.query('version', version, 'str')
self._send(http_method='DELETE',
location_id='a41192c8-9525-4b58-bc86-179fa549d80d',
version='4.0-preview.2',
route_values=route_values,
query_parameters=query_parameters)
def get_extension_by_id(self, extension_id, version=None, flags=None):
"""GetExtensionById.
[Preview API]
:param str extension_id:
:param str version:
:param str flags:
:rtype: :class:`<PublishedExtension> <gallery.v4_0.models.PublishedExtension>`
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
query_parameters = {}
if version is not None:
query_parameters['version'] = self._serialize.query('version', version, 'str')
if flags is not None:
query_parameters['flags'] = self._serialize.query('flags', flags, 'str')
response = self._send(http_method='GET',
location_id='a41192c8-9525-4b58-bc86-179fa549d80d',
version='4.0-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('PublishedExtension', response)
def update_extension_by_id(self, extension_id):
"""UpdateExtensionById.
[Preview API]
:param str extension_id:
:rtype: :class:`<PublishedExtension> <gallery.v4_0.models.PublishedExtension>`
"""
route_values = {}
if extension_id is not None:
route_values['extensionId'] = self._serialize.url('extension_id', extension_id, 'str')
response = self._send(http_method='PUT',
location_id='a41192c8-9525-4b58-bc86-179fa549d80d',
version='4.0-preview.2',
route_values=route_values)
return self._deserialize('PublishedExtension', response)
def create_extension_with_publisher(self, upload_stream, publisher_name, **kwargs):
"""CreateExtensionWithPublisher.
[Preview API]
:param object upload_stream: Stream to upload
:param str publisher_name:
:rtype: :class:`<PublishedExtension> <gallery.v4_0.models.PublishedExtension>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='POST',
location_id='e11ea35a-16fe-4b80-ab11-c4cab88a0966',
version='4.0-preview.2',
route_values=route_values,
content=content,
media_type='application/octet-stream')
return self._deserialize('PublishedExtension', response)
def delete_extension(self, publisher_name, extension_name, version=None):
"""DeleteExtension.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if version is not None:
query_parameters['version'] = self._serialize.query('version', version, 'str')
self._send(http_method='DELETE',
location_id='e11ea35a-16fe-4b80-ab11-c4cab88a0966',
version='4.0-preview.2',
route_values=route_values,
query_parameters=query_parameters)
def get_extension(self, publisher_name, extension_name, version=None, flags=None, account_token=None):
"""GetExtension.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:param str flags:
:param str account_token:
:rtype: :class:`<PublishedExtension> <gallery.v4_0.models.PublishedExtension>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if version is not None:
query_parameters['version'] = self._serialize.query('version', version, 'str')
if flags is not None:
query_parameters['flags'] = self._serialize.query('flags', flags, 'str')
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
response = self._send(http_method='GET',
location_id='e11ea35a-16fe-4b80-ab11-c4cab88a0966',
version='4.0-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('PublishedExtension', response)
def update_extension(self, upload_stream, publisher_name, extension_name, **kwargs):
"""UpdateExtension.
[Preview API]
:param object upload_stream: Stream to upload
:param str publisher_name:
:param str extension_name:
:rtype: :class:`<PublishedExtension> <gallery.v4_0.models.PublishedExtension>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='e11ea35a-16fe-4b80-ab11-c4cab88a0966',
version='4.0-preview.2',
route_values=route_values,
content=content,
media_type='application/octet-stream')
return self._deserialize('PublishedExtension', response)
def update_extension_properties(self, publisher_name, extension_name, flags):
"""UpdateExtensionProperties.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str flags:
:rtype: :class:`<PublishedExtension> <gallery.v4_0.models.PublishedExtension>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if flags is not None:
query_parameters['flags'] = self._serialize.query('flags', flags, 'str')
response = self._send(http_method='PATCH',
location_id='e11ea35a-16fe-4b80-ab11-c4cab88a0966',
version='4.0-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('PublishedExtension', response)
def extension_validator(self, azure_rest_api_request_model):
"""ExtensionValidator.
[Preview API]
:param :class:`<AzureRestApiRequestModel> <gallery.v4_0.models.AzureRestApiRequestModel>` azure_rest_api_request_model:
"""
content = self._serialize.body(azure_rest_api_request_model, 'AzureRestApiRequestModel')
self._send(http_method='POST',
location_id='05e8a5e1-8c59-4c2c-8856-0ff087d1a844',
version='4.0-preview.1',
content=content)
def send_notifications(self, notification_data):
"""SendNotifications.
[Preview API] Send Notification
:param :class:`<NotificationsData> <gallery.v4_0.models.NotificationsData>` notification_data: Denoting the data needed to send notification
"""
content = self._serialize.body(notification_data, 'NotificationsData')
self._send(http_method='POST',
location_id='eab39817-413c-4602-a49f-07ad00844980',
version='4.0-preview.1',
content=content)
def get_package(self, publisher_name, extension_name, version, account_token=None, accept_default=None, **kwargs):
"""GetPackage.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:param str account_token:
:param bool accept_default:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
if accept_default is not None:
query_parameters['acceptDefault'] = self._serialize.query('accept_default', accept_default, 'bool')
response = self._send(http_method='GET',
location_id='7cb576f8-1cae-4c4b-b7b1-e4af5759e965',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_asset_with_token(self, publisher_name, extension_name, version, asset_type, asset_token=None, account_token=None, accept_default=None, **kwargs):
"""GetAssetWithToken.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:param str asset_type:
:param str asset_token:
:param str account_token:
:param bool accept_default:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
if asset_token is not None:
route_values['assetToken'] = self._serialize.url('asset_token', asset_token, 'str')
query_parameters = {}
if account_token is not None:
query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str')
if accept_default is not None:
query_parameters['acceptDefault'] = self._serialize.query('accept_default', accept_default, 'bool')
response = self._send(http_method='GET',
location_id='364415a1-0077-4a41-a7a0-06edd4497492',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def query_publishers(self, publisher_query):
"""QueryPublishers.
[Preview API]
:param :class:`<PublisherQuery> <gallery.v4_0.models.PublisherQuery>` publisher_query:
:rtype: :class:`<PublisherQueryResult> <gallery.v4_0.models.PublisherQueryResult>`
"""
content = self._serialize.body(publisher_query, 'PublisherQuery')
response = self._send(http_method='POST',
location_id='2ad6ee0a-b53f-4034-9d1d-d009fda1212e',
version='4.0-preview.1',
content=content)
return self._deserialize('PublisherQueryResult', response)
def create_publisher(self, publisher):
"""CreatePublisher.
[Preview API]
:param :class:`<Publisher> <gallery.v4_0.models.Publisher>` publisher:
:rtype: :class:`<Publisher> <gallery.v4_0.models.Publisher>`
"""
content = self._serialize.body(publisher, 'Publisher')
response = self._send(http_method='POST',
location_id='4ddec66a-e4f6-4f5d-999e-9e77710d7ff4',
version='4.0-preview.1',
content=content)
return self._deserialize('Publisher', response)
def delete_publisher(self, publisher_name):
"""DeletePublisher.
[Preview API]
:param str publisher_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
self._send(http_method='DELETE',
location_id='4ddec66a-e4f6-4f5d-999e-9e77710d7ff4',
version='4.0-preview.1',
route_values=route_values)
def get_publisher(self, publisher_name, flags=None):
"""GetPublisher.
[Preview API]
:param str publisher_name:
:param int flags:
:rtype: :class:`<Publisher> <gallery.v4_0.models.Publisher>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if flags is not None:
query_parameters['flags'] = self._serialize.query('flags', flags, 'int')
response = self._send(http_method='GET',
location_id='4ddec66a-e4f6-4f5d-999e-9e77710d7ff4',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Publisher', response)
def update_publisher(self, publisher, publisher_name):
"""UpdatePublisher.
[Preview API]
:param :class:`<Publisher> <gallery.v4_0.models.Publisher>` publisher:
:param str publisher_name:
:rtype: :class:`<Publisher> <gallery.v4_0.models.Publisher>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
content = self._serialize.body(publisher, 'Publisher')
response = self._send(http_method='PUT',
location_id='4ddec66a-e4f6-4f5d-999e-9e77710d7ff4',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Publisher', response)
def get_questions(self, publisher_name, extension_name, count=None, page=None, after_date=None):
"""GetQuestions.
[Preview API] Returns a list of questions with their responses associated with an extension.
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param int count: Number of questions to retrieve (defaults to 10).
:param int page: Page number from which set of questions are to be retrieved.
:param datetime after_date: If provided, results questions are returned which were posted after this date
:rtype: :class:`<QuestionsResult> <gallery.v4_0.models.QuestionsResult>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if count is not None:
query_parameters['count'] = self._serialize.query('count', count, 'int')
if page is not None:
query_parameters['page'] = self._serialize.query('page', page, 'int')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='c010d03d-812c-4ade-ae07-c1862475eda5',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('QuestionsResult', response)
def report_question(self, concern, pub_name, ext_name, question_id):
"""ReportQuestion.
[Preview API] Flags a concern with an existing question for an extension.
:param :class:`<Concern> <gallery.v4_0.models.Concern>` concern: User reported concern with a question for the extension.
:param str pub_name: Name of the publisher who published the extension.
:param str ext_name: Name of the extension.
:param long question_id: Identifier of the question to be updated for the extension.
:rtype: :class:`<Concern> <gallery.v4_0.models.Concern>`
"""
route_values = {}
if pub_name is not None:
route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str')
if ext_name is not None:
route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
content = self._serialize.body(concern, 'Concern')
response = self._send(http_method='POST',
location_id='784910cd-254a-494d-898b-0728549b2f10',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Concern', response)
def create_question(self, question, publisher_name, extension_name):
"""CreateQuestion.
[Preview API] Creates a new question for an extension.
:param :class:`<Question> <gallery.v4_0.models.Question>` question: Question to be created for the extension.
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:rtype: :class:`<Question> <gallery.v4_0.models.Question>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
content = self._serialize.body(question, 'Question')
response = self._send(http_method='POST',
location_id='6d1d9741-eca8-4701-a3a5-235afc82dfa4',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Question', response)
def delete_question(self, publisher_name, extension_name, question_id):
"""DeleteQuestion.
[Preview API] Deletes an existing question and all its associated responses for an extension. (soft delete)
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param long question_id: Identifier of the question to be deleted for the extension.
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
self._send(http_method='DELETE',
location_id='6d1d9741-eca8-4701-a3a5-235afc82dfa4',
version='4.0-preview.1',
route_values=route_values)
def update_question(self, question, publisher_name, extension_name, question_id):
"""UpdateQuestion.
[Preview API] Updates an existing question for an extension.
:param :class:`<Question> <gallery.v4_0.models.Question>` question: Updated question to be set for the extension.
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param long question_id: Identifier of the question to be updated for the extension.
:rtype: :class:`<Question> <gallery.v4_0.models.Question>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
content = self._serialize.body(question, 'Question')
response = self._send(http_method='PATCH',
location_id='6d1d9741-eca8-4701-a3a5-235afc82dfa4',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Question', response)
def create_response(self, response, publisher_name, extension_name, question_id):
"""CreateResponse.
[Preview API] Creates a new response for a given question for an extension.
:param :class:`<Response> <gallery.v4_0.models.Response>` response: Response to be created for the extension.
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param long question_id: Identifier of the question for which response is to be created for the extension.
:rtype: :class:`<Response> <gallery.v4_0.models.Response>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
content = self._serialize.body(response, 'Response')
response = self._send(http_method='POST',
location_id='7f8ae5e0-46b0-438f-b2e8-13e8513517bd',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Response', response)
def delete_response(self, publisher_name, extension_name, question_id, response_id):
"""DeleteResponse.
[Preview API] Deletes a response for an extension. (soft delete)
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param long question_id: Identifies the question whose response is to be deleted.
:param long response_id: Identifies the response to be deleted.
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
if response_id is not None:
route_values['responseId'] = self._serialize.url('response_id', response_id, 'long')
self._send(http_method='DELETE',
location_id='7f8ae5e0-46b0-438f-b2e8-13e8513517bd',
version='4.0-preview.1',
route_values=route_values)
def update_response(self, response, publisher_name, extension_name, question_id, response_id):
"""UpdateResponse.
[Preview API] Updates an existing response for a given question for an extension.
:param :class:`<Response> <gallery.v4_0.models.Response>` response: Updated response to be set for the extension.
:param str publisher_name: Name of the publisher who published the extension.
:param str extension_name: Name of the extension.
:param long question_id: Identifier of the question for which response is to be updated for the extension.
:param long response_id: Identifier of the response which has to be updated.
:rtype: :class:`<Response> <gallery.v4_0.models.Response>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if question_id is not None:
route_values['questionId'] = self._serialize.url('question_id', question_id, 'long')
if response_id is not None:
route_values['responseId'] = self._serialize.url('response_id', response_id, 'long')
content = self._serialize.body(response, 'Response')
response = self._send(http_method='PATCH',
location_id='7f8ae5e0-46b0-438f-b2e8-13e8513517bd',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Response', response)
def get_extension_reports(self, publisher_name, extension_name, days=None, count=None, after_date=None):
"""GetExtensionReports.
[Preview API] Returns extension reports
:param str publisher_name: Name of the publisher who published the extension
:param str extension_name: Name of the extension
:param int days: Last n days report. If afterDate and days are specified, days will take priority
:param int count: Number of events to be returned
:param datetime after_date: Use if you want to fetch events newer than the specified date
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if days is not None:
query_parameters['days'] = self._serialize.query('days', days, 'int')
if count is not None:
query_parameters['count'] = self._serialize.query('count', count, 'int')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='79e0c74f-157f-437e-845f-74fbb4121d4c',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('object', response)
def get_reviews(self, publisher_name, extension_name, count=None, filter_options=None, before_date=None, after_date=None):
"""GetReviews.
[Preview API] Returns a list of reviews associated with an extension
:param str publisher_name: Name of the publisher who published the extension
:param str extension_name: Name of the extension
:param int count: Number of reviews to retrieve (defaults to 5)
:param str filter_options: FilterOptions to filter out empty reviews etcetera, defaults to none
:param datetime before_date: Use if you want to fetch reviews older than the specified date, defaults to null
:param datetime after_date: Use if you want to fetch reviews newer than the specified date, defaults to null
:rtype: :class:`<ReviewsResult> <gallery.v4_0.models.ReviewsResult>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if count is not None:
query_parameters['count'] = self._serialize.query('count', count, 'int')
if filter_options is not None:
query_parameters['filterOptions'] = self._serialize.query('filter_options', filter_options, 'str')
if before_date is not None:
query_parameters['beforeDate'] = self._serialize.query('before_date', before_date, 'iso-8601')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='5b3f819f-f247-42ad-8c00-dd9ab9ab246d',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReviewsResult', response)
def get_reviews_summary(self, pub_name, ext_name, before_date=None, after_date=None):
"""GetReviewsSummary.
[Preview API] Returns a summary of the reviews
:param str pub_name: Name of the publisher who published the extension
:param str ext_name: Name of the extension
:param datetime before_date: Use if you want to fetch summary of reviews older than the specified date, defaults to null
:param datetime after_date: Use if you want to fetch summary of reviews newer than the specified date, defaults to null
:rtype: :class:`<ReviewSummary> <gallery.v4_0.models.ReviewSummary>`
"""
route_values = {}
if pub_name is not None:
route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str')
if ext_name is not None:
route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str')
query_parameters = {}
if before_date is not None:
query_parameters['beforeDate'] = self._serialize.query('before_date', before_date, 'iso-8601')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='b7b44e21-209e-48f0-ae78-04727fc37d77',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReviewSummary', response)
def create_review(self, review, pub_name, ext_name):
"""CreateReview.
[Preview API] Creates a new review for an extension
:param :class:`<Review> <gallery.v4_0.models.Review>` review: Review to be created for the extension
:param str pub_name: Name of the publisher who published the extension
:param str ext_name: Name of the extension
:rtype: :class:`<Review> <gallery.v4_0.models.Review>`
"""
route_values = {}
if pub_name is not None:
route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str')
if ext_name is not None:
route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str')
content = self._serialize.body(review, 'Review')
response = self._send(http_method='POST',
location_id='e6e85b9d-aa70-40e6-aa28-d0fbf40b91a3',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Review', response)
def delete_review(self, pub_name, ext_name, review_id):
"""DeleteReview.
[Preview API] Deletes a review
:param str pub_name: Name of the pubilsher who published the extension
:param str ext_name: Name of the extension
:param long review_id: Id of the review which needs to be updated
"""
route_values = {}
if pub_name is not None:
route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str')
if ext_name is not None:
route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str')
if review_id is not None:
route_values['reviewId'] = self._serialize.url('review_id', review_id, 'long')
self._send(http_method='DELETE',
location_id='e6e85b9d-aa70-40e6-aa28-d0fbf40b91a3',
version='4.0-preview.1',
route_values=route_values)
def update_review(self, review_patch, pub_name, ext_name, review_id):
"""UpdateReview.
[Preview API] Updates or Flags a review
:param :class:`<ReviewPatch> <gallery.v4_0.models.ReviewPatch>` review_patch: ReviewPatch object which contains the changes to be applied to the review
:param str pub_name: Name of the pubilsher who published the extension
:param str ext_name: Name of the extension
:param long review_id: Id of the review which needs to be updated
:rtype: :class:`<ReviewPatch> <gallery.v4_0.models.ReviewPatch>`
"""
route_values = {}
if pub_name is not None:
route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str')
if ext_name is not None:
route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str')
if review_id is not None:
route_values['reviewId'] = self._serialize.url('review_id', review_id, 'long')
content = self._serialize.body(review_patch, 'ReviewPatch')
response = self._send(http_method='PATCH',
location_id='e6e85b9d-aa70-40e6-aa28-d0fbf40b91a3',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ReviewPatch', response)
def create_category(self, category):
"""CreateCategory.
[Preview API]
:param :class:`<ExtensionCategory> <gallery.v4_0.models.ExtensionCategory>` category:
:rtype: :class:`<ExtensionCategory> <gallery.v4_0.models.ExtensionCategory>`
"""
content = self._serialize.body(category, 'ExtensionCategory')
response = self._send(http_method='POST',
location_id='476531a3-7024-4516-a76a-ed64d3008ad6',
version='4.0-preview.1',
content=content)
return self._deserialize('ExtensionCategory', response)
def get_gallery_user_settings(self, user_scope, key=None):
"""GetGalleryUserSettings.
[Preview API] Get all setting entries for the given user/all-users scope
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str key: Optional key under which to filter all the entries
:rtype: {object}
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
if key is not None:
route_values['key'] = self._serialize.url('key', key, 'str')
response = self._send(http_method='GET',
location_id='9b75ece3-7960-401c-848b-148ac01ca350',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('{object}', self._unwrap_collection(response))
def set_gallery_user_settings(self, entries, user_scope):
"""SetGalleryUserSettings.
[Preview API] Set all setting entries for the given user/all-users scope
:param {object} entries: A key-value pair of all settings that need to be set
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
content = self._serialize.body(entries, '{object}')
self._send(http_method='PATCH',
location_id='9b75ece3-7960-401c-848b-148ac01ca350',
version='4.0-preview.1',
route_values=route_values,
content=content)
def generate_key(self, key_type, expire_current_seconds=None):
"""GenerateKey.
[Preview API]
:param str key_type:
:param int expire_current_seconds:
"""
route_values = {}
if key_type is not None:
route_values['keyType'] = self._serialize.url('key_type', key_type, 'str')
query_parameters = {}
if expire_current_seconds is not None:
query_parameters['expireCurrentSeconds'] = self._serialize.query('expire_current_seconds', expire_current_seconds, 'int')
self._send(http_method='POST',
location_id='92ed5cf4-c38b-465a-9059-2f2fb7c624b5',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
def get_signing_key(self, key_type):
"""GetSigningKey.
[Preview API]
:param str key_type:
:rtype: str
"""
route_values = {}
if key_type is not None:
route_values['keyType'] = self._serialize.url('key_type', key_type, 'str')
response = self._send(http_method='GET',
location_id='92ed5cf4-c38b-465a-9059-2f2fb7c624b5',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('str', response)
def update_extension_statistics(self, extension_statistics_update, publisher_name, extension_name):
"""UpdateExtensionStatistics.
[Preview API]
:param :class:`<ExtensionStatisticUpdate> <gallery.v4_0.models.ExtensionStatisticUpdate>` extension_statistics_update:
:param str publisher_name:
:param str extension_name:
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
content = self._serialize.body(extension_statistics_update, 'ExtensionStatisticUpdate')
self._send(http_method='PATCH',
location_id='a0ea3204-11e9-422d-a9ca-45851cc41400',
version='4.0-preview.1',
route_values=route_values,
content=content)
def get_extension_daily_stats(self, publisher_name, extension_name, days=None, aggregate=None, after_date=None):
"""GetExtensionDailyStats.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param int days:
:param str aggregate:
:param datetime after_date:
:rtype: :class:`<ExtensionDailyStats> <gallery.v4_0.models.ExtensionDailyStats>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
query_parameters = {}
if days is not None:
query_parameters['days'] = self._serialize.query('days', days, 'int')
if aggregate is not None:
query_parameters['aggregate'] = self._serialize.query('aggregate', aggregate, 'str')
if after_date is not None:
query_parameters['afterDate'] = self._serialize.query('after_date', after_date, 'iso-8601')
response = self._send(http_method='GET',
location_id='ae06047e-51c5-4fb4-ab65-7be488544416',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ExtensionDailyStats', response)
def get_extension_daily_stats_anonymous(self, publisher_name, extension_name, version):
"""GetExtensionDailyStatsAnonymous.
[Preview API] This route/location id only supports HTTP POST anonymously, so that the page view daily stat can be incremented from Marketplace client. Trying to call GET on this route should result in an exception. Without this explicit implementation, calling GET on this public route invokes the above GET implementation GetExtensionDailyStats.
:param str publisher_name: Name of the publisher
:param str extension_name: Name of the extension
:param str version: Version of the extension
:rtype: :class:`<ExtensionDailyStats> <gallery.v4_0.models.ExtensionDailyStats>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
response = self._send(http_method='GET',
location_id='4fa7adb6-ca65-4075-a232-5f28323288ea',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('ExtensionDailyStats', response)
def increment_extension_daily_stat(self, publisher_name, extension_name, version, stat_type):
"""IncrementExtensionDailyStat.
[Preview API] Increments a daily statistic associated with the extension
:param str publisher_name: Name of the publisher
:param str extension_name: Name of the extension
:param str version: Version of the extension
:param str stat_type: Type of stat to increment
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
query_parameters = {}
if stat_type is not None:
query_parameters['statType'] = self._serialize.query('stat_type', stat_type, 'str')
self._send(http_method='POST',
location_id='4fa7adb6-ca65-4075-a232-5f28323288ea',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
def get_verification_log(self, publisher_name, extension_name, version, **kwargs):
"""GetVerificationLog.
[Preview API]
:param str publisher_name:
:param str extension_name:
:param str version:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if version is not None:
route_values['version'] = self._serialize.url('version', version, 'str')
response = self._send(http_method='GET',
location_id='c5523abe-b843-437f-875b-5833064efe4d',
version='4.0-preview.1',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
| 53.776488
| 355
| 0.606418
|
4a062249caf6cf0bdd282a1a8bed382beb55f317
| 610
|
py
|
Python
|
iwant/core/engine/monitor/callbacks.py
|
nirvik/iWant
|
503b7289cac4056cb20cc156b746370def5e8e04
|
[
"MIT"
] | 323
|
2017-06-24T09:31:52.000Z
|
2022-02-25T03:10:00.000Z
|
iwant/core/engine/monitor/callbacks.py
|
crypticterminal/iWant
|
503b7289cac4056cb20cc156b746370def5e8e04
|
[
"MIT"
] | 7
|
2017-06-29T12:44:34.000Z
|
2021-06-04T23:37:19.000Z
|
iwant/core/engine/monitor/callbacks.py
|
crypticterminal/iWant
|
503b7289cac4056cb20cc156b746370def5e8e04
|
[
"MIT"
] | 28
|
2017-06-30T01:10:04.000Z
|
2021-03-10T02:38:44.000Z
|
from iwant.core.protocols import FilemonitorClientFactory
from iwant.core.config import SERVER_DAEMON_HOST, SERVER_DAEMON_PORT
from twisted.internet import reactor
from iwant.core.constants import INDEXED, FILE_SYS_EVENT
def filechangeCB(updates):
if len(updates['ADD']) != 0 or len(updates['DEL']) != 0:
factory = FilemonitorClientFactory(FILE_SYS_EVENT, updates)
reactor.connectTCP(SERVER_DAEMON_HOST, SERVER_DAEMON_PORT, factory)
def fileindexedCB(files):
factory = FilemonitorClientFactory(INDEXED, files)
reactor.connectTCP(SERVER_DAEMON_HOST, SERVER_DAEMON_PORT, factory)
| 38.125
| 75
| 0.793443
|
4a06225b63aaf9906a84ad77df97e4e6bd7f55d3
| 4,567
|
py
|
Python
|
qa/rpc-tests/netutil.py
|
TrueDividendCrypto/truecrypto-oss
|
d6dda1a4f467b772cccece1b3915d3e391e9809f
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/netutil.py
|
TrueDividendCrypto/truecrypto-oss
|
d6dda1a4f467b772cccece1b3915d3e391e9809f
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/netutil.py
|
TrueDividendCrypto/truecrypto-oss
|
d6dda1a4f467b772cccece1b3915d3e391e9809f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2014-2019 The Bitcoin Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
import binascii
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = binascii.unhexlify(host)
host_out = ''
for x in range(0, len(host)/4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split('\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return binascii.hexlify(bytearray(addr))
| 32.621429
| 113
| 0.599956
|
4a0622a55249a81bccc64f18a69cc77c6fe70d69
| 2,196
|
py
|
Python
|
toyClassification/SGHMC-64/datasets.py
|
nicolasrosa-forks/evaluating_bdl
|
2973b0d018551de0c9f087e2ae4e6b2c22f2ce3c
|
[
"MIT"
] | null | null | null |
toyClassification/SGHMC-64/datasets.py
|
nicolasrosa-forks/evaluating_bdl
|
2973b0d018551de0c9f087e2ae4e6b2c22f2ce3c
|
[
"MIT"
] | null | null | null |
toyClassification/SGHMC-64/datasets.py
|
nicolasrosa-forks/evaluating_bdl
|
2973b0d018551de0c9f087e2ae4e6b2c22f2ce3c
|
[
"MIT"
] | null | null | null |
# code-checked
# server-checked
import torch
import torch.utils.data
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pickle
class ToyDataset(torch.utils.data.Dataset):
def __init__(self):
self.examples = []
with open("/workspace/evaluating_bdl/toyClassification/x.pkl", "rb") as file: # (needed for python3)
x = pickle.load(file) # (shape: (2000, 2))
with open("/workspace/evaluating_bdl/toyClassification/y.pkl", "rb") as file: # (needed for python3)
y = pickle.load(file) # (shape: (2000, ))
x_1_train = []
x_2_train = []
y_train = []
for i in range(x.shape[0]):
if x[i, 0] > 0:
x_1_train.append(x[i, 0])
x_2_train.append(x[i, 1])
y_train.append(y[i])
y_train = np.array(y_train)
x_train = np.zeros((len(y_train), 2), dtype=np.float32)
x_train[:, 0] = np.array(x_1_train)
x_train[:, 1] = np.array(x_2_train)
x_train_false = x_train[y_train == 0] # (shape: (num_false, 2))
x_train_true = x_train[y_train == 1] # (shape: (num_true, 2))
print ("num_false: %d" % x_train_false.shape[0])
print ("num_true: %d" % x_train_true.shape[0])
plt.figure(1)
plt.plot(x_train_false[:, 0], x_train_false[:, 1], "r.")
plt.plot(x_train_true[:, 0], x_train_true[:, 1], "b.")
plt.ylabel("x_2")
plt.xlabel("x_1")
plt.xlim([-3, 3])
plt.ylim([-3, 3])
plt.savefig("/workspace/evaluating_bdl/toyClassification/SGHMC-64/training_data.png")
plt.close(1)
for i in range(x_train.shape[0]):
example = {}
example["x"] = x_train[i]
example["y"] = y_train[i]
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
x = example["x"]
y = example["y"]
return (x, y)
def __len__(self):
return self.num_examples
#_ = ToyDataset()
| 29.28
| 108
| 0.575137
|
4a06232bf7a2b6ec78634f4d0539bd1be322f251
| 112
|
py
|
Python
|
python/2914_Copyright.py
|
anothel/BOJ
|
cfc693322e609d319aaa8705d4375d098c034b76
|
[
"MIT"
] | null | null | null |
python/2914_Copyright.py
|
anothel/BOJ
|
cfc693322e609d319aaa8705d4375d098c034b76
|
[
"MIT"
] | null | null | null |
python/2914_Copyright.py
|
anothel/BOJ
|
cfc693322e609d319aaa8705d4375d098c034b76
|
[
"MIT"
] | null | null | null |
def main():
A, I = map(int, input().split())
print((I - 1) * A + 1)
if __name__ == "__main__":
main()
| 14
| 34
| 0.5
|
4a0623e34bbd6e03ab008f5f4496c6dc47d1e099
| 922
|
py
|
Python
|
app/ch16_mongodb/final/pypi_org/nosql/releases.py
|
tbensonwest/data-driven-web-apps-with-flask
|
be025c1c0190419019924f7516f49b3b8452cdf8
|
[
"MIT"
] | 496
|
2019-07-03T05:13:24.000Z
|
2022-03-27T01:15:10.000Z
|
app/ch16_mongodb/final/pypi_org/nosql/releases.py
|
tbensonwest/data-driven-web-apps-with-flask
|
be025c1c0190419019924f7516f49b3b8452cdf8
|
[
"MIT"
] | 20
|
2019-07-07T22:09:49.000Z
|
2021-12-28T03:03:09.000Z
|
app/ch16_mongodb/final/pypi_org/nosql/releases.py
|
tbensonwest/data-driven-web-apps-with-flask
|
be025c1c0190419019924f7516f49b3b8452cdf8
|
[
"MIT"
] | 562
|
2019-07-03T14:35:21.000Z
|
2022-03-31T06:23:58.000Z
|
import datetime
import mongoengine
class Release(mongoengine.Document):
package_id = mongoengine.StringField()
major_ver = mongoengine.IntField()
minor_ver = mongoengine.IntField()
build_ver = mongoengine.IntField()
created_date = mongoengine.DateTimeField(default=datetime.datetime.now)
comment = mongoengine.StringField()
url = mongoengine.StringField()
size = mongoengine.IntField()
meta = {
'db_alias': 'core',
'collection': 'releases',
'indexes': [
'created_date',
'package_id',
'major_ver',
'minor_ver',
'build_ver',
{'fields': ['major_ver', 'minor_ver', 'build_ver']},
{'fields': ['-major_ver', '-minor_ver', '-build_ver']},
]
}
@property
def version_text(self):
return '{}.{}.{}'.format(self.major_ver, self.minor_ver, self.build_ver)
| 26.342857
| 80
| 0.597614
|
4a062527cea05ca0d2778e1faf4bd710c8d9380a
| 1,371
|
py
|
Python
|
.transform/keras2tf.py
|
klrc/food-segmentation
|
f661f53120bdfe3d94b72b91a33a2286c95ed824
|
[
"MIT"
] | null | null | null |
.transform/keras2tf.py
|
klrc/food-segmentation
|
f661f53120bdfe3d94b72b91a33a2286c95ed824
|
[
"MIT"
] | null | null | null |
.transform/keras2tf.py
|
klrc/food-segmentation
|
f661f53120bdfe3d94b72b91a33a2286c95ed824
|
[
"MIT"
] | null | null | null |
from keras.models import load_model
import tensorflow as tf
import os
import os.path as osp
from keras import backend as K
# 路径参数
input_path = 'input path'
weight_file = 'weight.h5'
weight_file_path = osp.join(input_path, weight_file)
output_graph_name = weight_file[:-3] + '.pb'
# 转换函数
def h5_to_pb(h5_model, output_dir, model_name, out_prefix="output_", log_tensorboard=True):
if osp.exists(output_dir) == False:
os.mkdir(output_dir)
out_nodes = []
for i in range(len(h5_model.outputs)):
out_nodes.append(out_prefix + str(i + 1))
tf.identity(h5_model.output[i], out_prefix + str(i + 1))
sess = K.get_session()
from tensorflow.python.framework import graph_util, graph_io
init_graph = sess.graph.as_graph_def()
main_graph = graph_util.convert_variables_to_constants(
sess, init_graph, out_nodes)
graph_io.write_graph(main_graph, output_dir,
name=model_name, as_text=False)
if log_tensorboard:
from tensorflow.python.tools import import_pb_to_tensorboard
import_pb_to_tensorboard.import_to_tensorboard(
osp.join(output_dir, model_name), output_dir)
# 输出路径
output_dir = osp.join(os.getcwd(), "trans_model")
# 加载模型
h5_model = load_model(weight_file_path)
h5_to_pb(h5_model, output_dir=output_dir, model_name=output_graph_name)
print('model saved')
| 34.275
| 91
| 0.726477
|
4a0625707b6b0d328dad1814d646016888e6f695
| 1,178
|
py
|
Python
|
scrapy_doubanmovie/scrapy_doubanmovie/pipelines.py
|
davidvivi/you-need-Python
|
0a9e1fcb1f1439006304ed57771e5e8ff3a28554
|
[
"MIT"
] | 4
|
2018-06-12T01:05:13.000Z
|
2019-12-13T10:10:15.000Z
|
scrapy_doubanmovie/scrapy_doubanmovie/pipelines.py
|
davidvivi/you-need-Python
|
0a9e1fcb1f1439006304ed57771e5e8ff3a28554
|
[
"MIT"
] | 8
|
2021-03-18T21:26:26.000Z
|
2022-03-11T23:33:18.000Z
|
scrapy_doubanmovie/scrapy_doubanmovie/pipelines.py
|
davidvivi/you-need-Python
|
0a9e1fcb1f1439006304ed57771e5e8ff3a28554
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
from . import settings
class ScrapyDoubanmoviePipeline(object):
def __init__(self):
self.connect = pymysql.connect(
host=settings.MYSQL_HOST,
db=settings.MYSQL_DBNAME,
user=settings.MYSQL_USER,
password=settings.MYSQL_PASSWD,
charset='utf8',
use_unicode=True
)
self.cursor = self.connect.cursor()
def process_item(self, item, spider):
try:
self.cursor.execute(
"""
insert into doubantop250(title,introduce,star,evaluate,quote) value (%s,%s,%s,%s,%s)
""", (
item['title'],
item['introduce'],
item['star'],
item['evaluate'],
item['quote']
)
)
self.connect.commit()
except Exception as e:
print("错误信息为:" + str(e))
return item
| 29.45
| 104
| 0.52292
|
4a0626e89bc46c8ad5430ed182a748b308ad744c
| 3,023
|
py
|
Python
|
FileI:O/file_input.py
|
mcorley-gba/IntroCS21-22
|
a823e17f2cb618be0e67468cb15f48873ae85152
|
[
"MIT"
] | null | null | null |
FileI:O/file_input.py
|
mcorley-gba/IntroCS21-22
|
a823e17f2cb618be0e67468cb15f48873ae85152
|
[
"MIT"
] | null | null | null |
FileI:O/file_input.py
|
mcorley-gba/IntroCS21-22
|
a823e17f2cb618be0e67468cb15f48873ae85152
|
[
"MIT"
] | null | null | null |
#File Input.py
#We will experiment with file inputs here.
#Saved in pi_digits.txt is the first 30 decimal places of pi.
#1. Open the file.
#2. Read the entire contents
#3. Do something (print)
#with open('pi_digits.txt') as file_object:
# contents = file_object.read()
#more commands here
#print(contents.rstrip())
#open keyword needs one argument: filename as a string
#python will look for this file in the same directory as the script.
#file_object is a local python variable where the file contents are located
#with is a keyword that begins a block of code for working with the file and
#it will automatically close the file when the block is complete
#Data is in a sub-directory
with open('data_files/pi_digits.txt') as file_object:
contents = file_object.read()
print(contents)
#with open('subdir1/subdir2/subdir3.../pi_digits.txt')
#In MacOS and Linux operating systems, filepaths are given with the forward slash /
#In windows, filepaths are given with a backslash: C:...\my_file.docx
# If you use the backslash in python, you get an error b/c backslash is special:
# C:\path\to\my_file.txt
#For python commands -- always use forward slash, even on windows.
#The path 'data_files/pi_digits.txt' is called a relative filepath -- it is given in relation
#to the current file position.
#Python can also take 'absolute filepaths' -- an exact description from the top down of
#where something is on the computer
#Absolute filepaths are normally longer than relative filepaths -- store them in a string
#before giving the open command.
file_path = '/Users/michaelcorley/Movies/pi_digits.txt'
with open(file_path) as file_object:
contents = file_object.read()
print(contents)
#with open('pi_digits.txt') as file_object:
# contents = file_object.read()
#print(contents)
#to read line by line, we will use for loops:
with open(file_path) as file_object:
for line in file_object:
print(line.rstrip())
#Make a list from the lines of a file:
with open(file_path) as file_object:
lines = file_object.readlines()
pi_string='' #empty string for storing all first 30 decimal places
for line in lines:
pi_string += line.strip()
print(pi_string)
print(len(pi_string))
#When python read text tiles all the data is read as string data. Convert using int(), eval(), or float()
#Reading Large Data Files
file_path = 'data_files/pi_million_digits.txt'
with open(file_path) as file_object:
lines = file_object.readlines()
pi_string = ''
for line in lines:
pi_string += line.strip()
print(f"{pi_string[:52]} ...")
print(len(pi_string))
#Is your birthday in pi
birthday = input("Enter your birthday in the form mmddyy: ")
if birthday in pi_string:
print("Your birthday appear in the first million decimal points of pi!")
else:
print("Your birthday does not appear in the first million decimal points of pi.")
#Python has no limit to how much data it can process at once. The only limits will come from
#Your own system's memory and storage.
| 30.846939
| 105
| 0.742971
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.